diff --git a/storage/2023-11-03/README.md b/storage/2023-11-03/README.md new file mode 100644 index 0000000..9263d8a --- /dev/null +++ b/storage/2023-11-03/README.md @@ -0,0 +1,31 @@ +# Storage API Version 2023-11-03 + +The following APIs are supported by this SDK - more information about each SDK can be found within the README in each package. + +## Blob Storage + +- [Blobs API](blob/blobs) +- [Containers API](blob/containers) +- [Accounts API](blob/accounts) + +## DataLakeStore Gen2 + +- [FileSystems API](datalakestore/filesystems) +- [Paths API](datalakestore/paths) + +## File Storage + +- [Directories API](file/directories) +- [Files API](file/files) +- [Shares API](file/shares) + +## Queue Storage + +- [Queues API](queue/queues) +- [Messages API](queue/messages) + +## Table Storage + +- [Entities API](table/entities) +- [Tables API](table/tables) + diff --git a/storage/2023-11-03/blob/accounts/README.md b/storage/2023-11-03/blob/accounts/README.md new file mode 100644 index 0000000..5fa24a7 --- /dev/null +++ b/storage/2023-11-03/blob/accounts/README.md @@ -0,0 +1,57 @@ +## Blob Storage Account SDK for API version 2020-08-04 + +This package allows you to interact with the Accounts Blob Storage API + +### Supported Authorizers + +* Azure Active Directory + +### Example Usage + +```go +package main + +import ( + "context" + "fmt" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/accounts" +) + +func Example() error { + accountName := "storageaccount1" + + // e.g. https://github.com/tombuildsstuff/giovanni/blob/76f5f686c99ecdcc3fa533a0330d0e1aacb1c327/example/azuread-auth/main.go#L54 + client, err := buildClient() + if err != nil { + return fmt.Errorf("error building client: %s", err) + } + + ctx := context.TODO() + + input := StorageServiceProperties{ + StaticWebsite: &StaticWebsite{ + Enabled: true, + IndexDocument: index, + ErrorDocument404Path: errorDocument, + }, + } + + _, err = client.SetServiceProperties(ctx, accountName, input) + if err != nil { + return fmt.Errorf("error setting properties: %s", err) + } + + time.Sleep(2 * time.Second) + + _, err = accountsClient.GetServiceProperties(ctx, accountName) + if err != nil { + return fmt.Errorf("error getting properties: %s", err) + } + + return nil +} + +``` \ No newline at end of file diff --git a/storage/2023-11-03/blob/accounts/client.go b/storage/2023-11-03/blob/accounts/client.go new file mode 100644 index 0000000..1a3c314 --- /dev/null +++ b/storage/2023-11-03/blob/accounts/client.go @@ -0,0 +1,22 @@ +package accounts + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/dataplane/storage" +) + +// Client is the base client for Blob Storage Blobs. +type Client struct { + Client *storage.BaseClient +} + +func NewWithBaseUri(baseUri string) (*Client, error) { + baseClient, err := storage.NewBaseClient(baseUri, componentName, apiVersion) + if err != nil { + return nil, fmt.Errorf("building base client: %+v", err) + } + return &Client{ + Client: baseClient, + }, nil +} diff --git a/storage/2023-11-03/blob/accounts/get_service_properties.go b/storage/2023-11-03/blob/accounts/get_service_properties.go new file mode 100644 index 0000000..01ea987 --- /dev/null +++ b/storage/2023-11-03/blob/accounts/get_service_properties.go @@ -0,0 +1,49 @@ +package accounts + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" +) + +type GetServicePropertiesResult struct { + HttpResponse *client.Response + Model *StorageServiceProperties +} + +func (c Client) GetServiceProperties(ctx context.Context, accountName string) (resp GetServicePropertiesResult, err error) { + if accountName == "" { + return resp, fmt.Errorf("`accountName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "text/xml", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: servicePropertiesOptions{}, + Path: "/", + } + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if err = resp.HttpResponse.Unmarshal(&resp.Model); err != nil { + err = fmt.Errorf("unmarshaling response: %+v", err) + return + } + } + + return +} diff --git a/storage/2023-11-03/blob/accounts/models.go b/storage/2023-11-03/blob/accounts/models.go new file mode 100644 index 0000000..c36d45e --- /dev/null +++ b/storage/2023-11-03/blob/accounts/models.go @@ -0,0 +1,74 @@ +package accounts + +type StorageServiceProperties struct { + // Cors - Specifies CORS rules for the Blob service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the Blob service. + Cors *CorsRules `xml:"Cors,omitempty"` + // DefaultServiceVersion - DefaultServiceVersion indicates the default version to use for requests to the Blob service if an incoming request’s version is not specified. Possible values include version 2008-10-27 and all more recent versions. + DefaultServiceVersion *string `xml:"DefaultServiceVersion,omitempty"` + // DeleteRetentionPolicy - The blob service properties for soft delete. + DeleteRetentionPolicy *DeleteRetentionPolicy `xml:"DeleteRetentionPolicy,omitempty"` + // Logging - The blob service properties for logging access + Logging *Logging `xml:"Logging,omitempty"` + // HourMetrics - The blob service properties for hour metrics + HourMetrics *MetricsConfig `xml:"HourMetrics,omitempty"` + // HourMetrics - The blob service properties for minute metrics + MinuteMetrics *MetricsConfig `xml:"MinuteMetrics,omitempty"` + // StaticWebsite - Optional + StaticWebsite *StaticWebsite `xml:"StaticWebsite,omitempty"` +} + +// StaticWebsite sets the static website support properties on the Blob service. +type StaticWebsite struct { + // Enabled - Required. Indicates whether static website support is enabled for the given account. + Enabled bool `xml:"Enabled"` + // IndexDocument - Optional. The webpage that Azure Storage serves for requests to the root of a website or any subfolder. For example, index.html. The value is case-sensitive. + IndexDocument string `xml:"IndexDocument,omitempty"` + // ErrorDocument404Path - Optional. The absolute path to a webpage that Azure Storage serves for requests that do not correspond to an existing file. For example, error/404.html. Only a single custom 404 page is supported in each static website. The value is case-sensitive. + ErrorDocument404Path string `xml:"ErrorDocument404Path,omitempty"` +} + +// CorsRules sets the CORS rules. You can include up to five CorsRule elements in the request. +type CorsRules struct { + // CorsRules - The List of CORS rules. You can include up to five CorsRule elements in the request. + CorsRules []CorsRule `xml:"CorsRules,omitempty"` +} + +// DeleteRetentionPolicy the blob service properties for soft delete. +type DeleteRetentionPolicy struct { + // Enabled - Indicates whether DeleteRetentionPolicy is enabled for the Blob service. + Enabled bool `xml:"Enabled,omitempty"` + // Days - Indicates the number of days that the deleted blob should be retained. The minimum specified value can be 1 and the maximum value can be 365. + Days int32 `xml:"Days,omitempty"` +} + +// CorsRule specifies a CORS rule for the Blob service. +type CorsRule struct { + // AllowedOrigins - Required if CorsRule element is present. A list of origin domains that will be allowed via CORS, or "" to allow all domains + AllowedOrigins []string `xml:"AllowedOrigins,omitempty"` + // AllowedMethods - Required if CorsRule element is present. A list of HTTP methods that are allowed to be executed by the origin. + AllowedMethods []string `xml:"AllowedMethods,omitempty"` + // MaxAgeInSeconds - Required if CorsRule element is present. The number of seconds that the client/browser should cache a preflight response. + MaxAgeInSeconds int32 `xml:"MaxAgeInSeconds,omitempty"` + // ExposedHeaders - Required if CorsRule element is present. A list of response headers to expose to CORS clients. + ExposedHeaders []string `xml:"ExposedHeaders,omitempty"` + // AllowedHeaders - Required if CorsRule element is present. A list of headers allowed to be part of the cross-origin request. + AllowedHeaders []string `xml:"AllowedHeaders,omitempty"` +} + +// Logging specifies the access logging options for the Blob service. +type Logging struct { + Version string `xml:"Version"` + Delete bool `xml:"Delete"` + Read bool `xml:"Read"` + Write bool `xml:"Write"` + RetentionPolicy DeleteRetentionPolicy `xml:"RetentionPolicy"` +} + +// MetricsConfig specifies the hour and/or minute metrics options for the Blob service. +// Elements are all expected +type MetricsConfig struct { + Version string `xml:"Version"` + Enabled bool `xml:"Enabled"` + RetentionPolicy DeleteRetentionPolicy `xml:"RetentionPolicy"` + IncludeAPIs bool `xml:"IncludeAPIs"` +} diff --git a/storage/2023-11-03/blob/accounts/service_properties_shared.go b/storage/2023-11-03/blob/accounts/service_properties_shared.go new file mode 100644 index 0000000..3019155 --- /dev/null +++ b/storage/2023-11-03/blob/accounts/service_properties_shared.go @@ -0,0 +1,26 @@ +package accounts + +import ( + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +var _ client.Options = servicePropertiesOptions{} + +type servicePropertiesOptions struct { +} + +func (servicePropertiesOptions) ToHeaders() *client.Headers { + return nil +} + +func (servicePropertiesOptions) ToOData() *odata.Query { + return nil +} + +func (servicePropertiesOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "properties") + out.Append("restype", "service") + return out +} diff --git a/storage/2023-11-03/blob/accounts/set_service_properties.go b/storage/2023-11-03/blob/accounts/set_service_properties.go new file mode 100644 index 0000000..173a80f --- /dev/null +++ b/storage/2023-11-03/blob/accounts/set_service_properties.go @@ -0,0 +1,45 @@ +package accounts + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" +) + +type SetServicePropertiesResult struct { + HttpResponse *client.Response +} + +func (c Client) SetServiceProperties(ctx context.Context, accountName string, input StorageServiceProperties) (resp SetServicePropertiesResult, err error) { + if accountName == "" { + return resp, fmt.Errorf("`accountName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "text/xml", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodPut, + OptionsObject: servicePropertiesOptions{}, + Path: "/", + } + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + if err = req.Marshal(&input); err != nil { + err = fmt.Errorf("marshaling request: %+v", err) + return + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} diff --git a/storage/2023-11-03/blob/accounts/set_service_properties_test.go b/storage/2023-11-03/blob/accounts/set_service_properties_test.go new file mode 100644 index 0000000..0e642fc --- /dev/null +++ b/storage/2023-11-03/blob/accounts/set_service_properties_test.go @@ -0,0 +1,106 @@ +package accounts + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +func TestContainerLifecycle(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + + _, err = client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindStorageVTwo) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + accountsClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.blob.%s", accountName, *domainSuffix)) + if err != nil { + t.Fatal(fmt.Errorf("building client for environment: %+v", err)) + } + client.PrepareWithResourceManagerAuth(accountsClient.Client) + + input := StorageServiceProperties{} + _, err = accountsClient.SetServiceProperties(ctx, accountName, input) + if err != nil { + t.Fatal(fmt.Errorf("error setting properties: %s", err)) + } + + var index = "index.html" + //var enabled = true + var errorDocument = "404.html" + + input = StorageServiceProperties{ + StaticWebsite: &StaticWebsite{ + Enabled: true, + IndexDocument: index, + ErrorDocument404Path: errorDocument, + }, + Logging: &Logging{ + Version: "2.0", + Delete: true, + Read: true, + Write: true, + RetentionPolicy: DeleteRetentionPolicy{ + Enabled: true, + Days: 7, + }, + }, + } + + _, err = accountsClient.SetServiceProperties(ctx, accountName, input) + if err != nil { + t.Fatal(fmt.Errorf("error setting properties: %s", err)) + } + + t.Log("[DEBUG] Waiting 2 seconds..") + time.Sleep(2 * time.Second) + + result, err := accountsClient.GetServiceProperties(ctx, accountName) + if err != nil { + t.Fatal(fmt.Errorf("error getting properties: %s", err)) + } + + website := result.Model.StaticWebsite + if website.Enabled != true { + t.Fatalf("Expected the StaticWebsite %t but got %t", true, website.Enabled) + } + + logging := result.Model.Logging + if logging.Version != "2.0" { + t.Fatalf("Expected the Logging Version %s but got %s", "2.0", logging.Version) + } + if !logging.Read { + t.Fatalf("Expected the Logging Read %t but got %t", true, logging.Read) + } + if !logging.Write { + t.Fatalf("Expected the Logging Write %t but got %t", true, logging.Write) + } + if !logging.Delete { + t.Fatalf("Expected the Logging Delete %t but got %t", true, logging.Delete) + } + if !logging.RetentionPolicy.Enabled { + t.Fatalf("Expected the Logging RetentionPolicy.Enabled %t but got %t", true, logging.RetentionPolicy.Enabled) + } + if logging.RetentionPolicy.Days != 7 { + t.Fatalf("Expected the Logging RetentionPolicy.Enabled %d but got %d", 7, logging.RetentionPolicy.Days) + } +} diff --git a/storage/2023-11-03/blob/accounts/version.go b/storage/2023-11-03/blob/accounts/version.go new file mode 100644 index 0000000..427d66d --- /dev/null +++ b/storage/2023-11-03/blob/accounts/version.go @@ -0,0 +1,4 @@ +package accounts + +const apiVersion = "2023-11-03" +const componentName = "blob/accounts" diff --git a/storage/2023-11-03/blob/blobs/README.md b/storage/2023-11-03/blob/blobs/README.md new file mode 100644 index 0000000..19218cf --- /dev/null +++ b/storage/2023-11-03/blob/blobs/README.md @@ -0,0 +1,46 @@ +## Blob Storage Blobs SDK for API version 2020-08-04 + +This package allows you to interact with the Blobs Blob Storage API + +### Supported Authorizers + +* Azure Active Directory (for the Resource Endpoint `https://storage.azure.com`) +* SharedKeyLite (Blob, File & Queue) + +### Example Usage + +```go +package main + +import ( + "context" + "fmt" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/blobs" +) + +func Example() error { + accountName := "storageaccount1" + storageAccountKey := "ABC123...." + containerName := "mycontainer" + fileName := "example-large-file.iso" + + storageAuth := autorest.NewSharedKeyLiteAuthorizer(accountName, storageAccountKey) + blobClient := blobs.New() + blobClient.Client.Authorizer = storageAuth + + ctx := context.TODO() + copyInput := blobs.CopyInput{ + CopySource: "http://releases.ubuntu.com/14.04/ubuntu-14.04.6-desktop-amd64.iso", + } + refreshInterval := 5 * time.Second + if err := blobClient.CopyAndWait(ctx, accountName, containerName, fileName, copyInput, refreshInterval); err != nil { + return fmt.Errorf("Error copying: %s", err) + } + + return nil +} + +``` \ No newline at end of file diff --git a/storage/2023-11-03/blob/blobs/api.go b/storage/2023-11-03/blob/blobs/api.go new file mode 100644 index 0000000..79afd7a --- /dev/null +++ b/storage/2023-11-03/blob/blobs/api.go @@ -0,0 +1,42 @@ +package blobs + +import ( + "context" + "os" + "time" +) + +type StorageBlob interface { + AppendBlock(ctx context.Context, containerName string, blobName string, input AppendBlockInput) (AppendBlockResponse, error) + Copy(ctx context.Context, containerName string, blobName string, input CopyInput) (CopyResponse, error) + AbortCopy(ctx context.Context, containerName string, blobName string, input AbortCopyInput) (CopyAbortResponse, error) + CopyAndWait(ctx context.Context, containerName string, blobName string, input CopyInput, pollingInterval time.Duration) error + Delete(ctx context.Context, containerName string, blobName string, input DeleteInput) (DeleteResponse, error) + DeleteSnapshot(ctx context.Context, containerName string, blobName string, input DeleteSnapshotInput) (DeleteSnapshotResponse, error) + DeleteSnapshots(ctx context.Context, containerName string, blobName string, input DeleteSnapshotsInput) (DeleteSnapshotsResponse, error) + Get(ctx context.Context, containerName string, blobName string, input GetInput) (GetResponse, error) + GetBlockList(ctx context.Context, containerName string, blobName string, input GetBlockListInput) (GetBlockListResponse, error) + GetPageRanges(ctx context.Context, containerName, blobName string, input GetPageRangesInput) (GetPageRangesResponse, error) + IncrementalCopyBlob(ctx context.Context, containerName string, blobName string, input IncrementalCopyBlobInput) (IncrementalCopyBlob, error) + AcquireLease(ctx context.Context, containerName string, blobName string, input AcquireLeaseInput) (AcquireLeaseResponse, error) + BreakLease(ctx context.Context, containerName string, blobName string, input BreakLeaseInput) (BreakLeaseResponse, error) + ChangeLease(ctx context.Context, containerName string, blobName string, input ChangeLeaseInput) (ChangeLeaseResponse, error) + ReleaseLease(ctx context.Context, containerName string, blobName string, input ReleaseLeaseInput) (ReleaseLeaseResponse, error) + RenewLease(ctx context.Context, containerName string, blobName string, input RenewLeaseInput) (RenewLeaseResponse, error) + SetMetaData(ctx context.Context, containerName string, blobName string, input SetMetaDataInput) (SetMetaDataResponse, error) + GetProperties(ctx context.Context, containerName string, blobName string, input GetPropertiesInput) (GetPropertiesResponse, error) + SetProperties(ctx context.Context, containerName string, blobName string, input SetPropertiesInput) (SetPropertiesResponse, error) + PutAppendBlob(ctx context.Context, containerName string, blobName string, input PutAppendBlobInput) (PutAppendBlobResponse, error) + PutBlock(ctx context.Context, containerName string, blobName string, input PutBlockInput) (PutBlockResponse, error) + PutBlockBlob(ctx context.Context, containerName string, blobName string, input PutBlockBlobInput) (PutBlockBlobResponse, error) + PutBlockBlobFromFile(ctx context.Context, containerName string, blobName string, file *os.File, input PutBlockBlobInput) error + PutBlockList(ctx context.Context, containerName string, blobName string, input PutBlockListInput) (PutBlockListResponse, error) + PutBlockFromURL(ctx context.Context, containerName string, blobName string, input PutBlockFromURLInput) (PutBlockFromURLResponse, error) + PutPageBlob(ctx context.Context, containerName string, blobName string, input PutPageBlobInput) (PutPageBlobResponse, error) + PutPageClear(ctx context.Context, containerName string, blobName string, input PutPageClearInput) (PutPageClearResponse, error) + PutPageUpdate(ctx context.Context, containerName string, blobName string, input PutPageUpdateInput) (PutPageUpdateResponse, error) + SetTier(ctx context.Context, containerName string, blobName string, input SetTierInput) (SetTierResponse, error) + Snapshot(ctx context.Context, containerName string, blobName string, input SnapshotInput) (SnapshotResponse, error) + GetSnapshotProperties(ctx context.Context, containerName string, blobName string, input GetSnapshotPropertiesInput) (GetPropertiesResponse, error) + Undelete(ctx context.Context, containerName string, blobName string) (UndeleteResponse, error) +} diff --git a/storage/2023-11-03/blob/blobs/append_block.go b/storage/2023-11-03/blob/blobs/append_block.go new file mode 100644 index 0000000..a1068d4 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/append_block.go @@ -0,0 +1,160 @@ +package blobs + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type AppendBlockInput struct { + + // A number indicating the byte offset to compare. + // Append Block will succeed only if the append position is equal to this number. + // If it is not, the request will fail with an AppendPositionConditionNotMet + // error (HTTP status code 412 – Precondition Failed) + BlobConditionAppendPosition *int64 + + // The max length in bytes permitted for the append blob. + // If the Append Block operation would cause the blob to exceed that limit or if the blob size + // is already greater than the value specified in this header, the request will fail with + // an MaxBlobSizeConditionNotMet error (HTTP status code 412 – Precondition Failed). + BlobConditionMaxSize *int64 + + // The Bytes which should be appended to the end of this Append Blob. + // This can either be nil, which creates an empty blob, or a byte array + Content *[]byte + + // An MD5 hash of the block content. + // This hash is used to verify the integrity of the block during transport. + // When this header is specified, the storage service compares the hash of the content + // that has arrived with this header value. + // + // Note that this MD5 hash is not stored with the blob. + // If the two hashes do not match, the operation will fail with error code 400 (Bad Request). + ContentMD5 *string + + // Required if the blob has an active lease. + // To perform this operation on a blob with an active lease, specify the valid lease ID for this header. + LeaseID *string +} + +type AppendBlockResponse struct { + HttpResponse *client.Response + + BlobAppendOffset string + BlobCommittedBlockCount int64 + ContentMD5 string + ETag string + LastModified string +} + +// AppendBlock commits a new block of data to the end of an existing append blob. +func (c Client) AppendBlock(ctx context.Context, containerName, blobName string, input AppendBlockInput) (resp AppendBlockResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if input.Content != nil && len(*input.Content) > (4*1024*1024) { + return resp, fmt.Errorf("`input.Content` must be at most 4MB") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: appendBlockOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + if input.Content != nil { + req.Body = io.NopCloser(bytes.NewReader(*input.Content)) + } + + req.ContentLength = int64(len(*input.Content)) + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.BlobAppendOffset = resp.HttpResponse.Header.Get("x-ms-blob-append-offset") + resp.ContentMD5 = resp.HttpResponse.Header.Get("Content-MD5") + resp.ETag = resp.HttpResponse.Header.Get("ETag") + resp.LastModified = resp.HttpResponse.Header.Get("Last-Modified") + + if v := resp.HttpResponse.Header.Get("x-ms-blob-committed-block-count"); v != "" { + i, innerErr := strconv.Atoi(v) + if innerErr != nil { + err = fmt.Errorf("error parsing %q as an integer: %s", v, innerErr) + return + } + resp.BlobCommittedBlockCount = int64(i) + } + + } + } + + return +} + +type appendBlockOptions struct { + input AppendBlockInput +} + +func (a appendBlockOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + if a.input.BlobConditionAppendPosition != nil { + headers.Append("x-ms-blob-condition-appendpos", strconv.Itoa(int(*a.input.BlobConditionAppendPosition))) + } + if a.input.BlobConditionMaxSize != nil { + headers.Append("x-ms-blob-condition-maxsize", strconv.Itoa(int(*a.input.BlobConditionMaxSize))) + } + if a.input.ContentMD5 != nil { + headers.Append("x-ms-blob-content-md5", *a.input.ContentMD5) + } + if a.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *a.input.LeaseID) + } + if a.input.Content != nil { + headers.Append("Content-Length", strconv.Itoa(len(*a.input.Content))) + } + return headers +} + +func (a appendBlockOptions) ToOData() *odata.Query { + return nil +} + +func (a appendBlockOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "appendblock") + return out +} diff --git a/storage/2023-11-03/blob/blobs/blob_append_test.go b/storage/2023-11-03/blob/blobs/blob_append_test.go new file mode 100644 index 0000000..365f6a1 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/blob_append_test.go @@ -0,0 +1,174 @@ +package blobs + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/containers" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +func TestAppendBlobLifecycle(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + containerName := fmt.Sprintf("cont-%d", testhelpers.RandomInt()) + fileName := "append-blob.txt" + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindBlobStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + + containersClient, err := containers.NewWithBaseUri(fmt.Sprintf("https://%s.blob.%s", testData.StorageAccountName, *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(containersClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + _, err = containersClient.Create(ctx, containerName, containers.CreateInput{}) + if err != nil { + t.Fatal(fmt.Errorf("Error creating: %s", err)) + } + defer containersClient.Delete(ctx, containerName) + + blobClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.blob.%s", testData.StorageAccountName, *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(blobClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + t.Logf("[DEBUG] Putting Append Blob..") + if _, err := blobClient.PutAppendBlob(ctx, containerName, fileName, PutAppendBlobInput{}); err != nil { + t.Fatalf("Error putting append blob: %s", err) + } + + t.Logf("[DEBUG] Retrieving Properties..") + props, err := blobClient.GetProperties(ctx, containerName, fileName, GetPropertiesInput{}) + if err != nil { + t.Fatalf("Error retrieving properties: %s", err) + } + if props.ContentLength != 0 { + t.Fatalf("Expected Content-Length to be 0 but it was %d", props.ContentLength) + } + + t.Logf("[DEBUG] Appending First Block..") + appendInput := AppendBlockInput{ + Content: &[]byte{ + 12, + 48, + 93, + 76, + 29, + 10, + }, + } + if _, err := blobClient.AppendBlock(ctx, containerName, fileName, appendInput); err != nil { + t.Fatalf("Error appending first block: %s", err) + } + + t.Logf("[DEBUG] Re-Retrieving Properties..") + props, err = blobClient.GetProperties(ctx, containerName, fileName, GetPropertiesInput{}) + if err != nil { + t.Fatalf("Error retrieving properties: %s", err) + } + if props.ContentLength != 6 { + t.Fatalf("Expected Content-Length to be 6 but it was %d", props.ContentLength) + } + + t.Logf("[DEBUG] Appending Second Block..") + appendInput = AppendBlockInput{ + Content: &[]byte{ + 92, + 62, + 64, + 47, + 83, + 77, + }, + } + if _, err := blobClient.AppendBlock(ctx, containerName, fileName, appendInput); err != nil { + t.Fatalf("Error appending Second block: %s", err) + } + + t.Logf("[DEBUG] Re-Retrieving Properties..") + props, err = blobClient.GetProperties(ctx, containerName, fileName, GetPropertiesInput{}) + if err != nil { + t.Fatalf("Error retrieving properties: %s", err) + } + if props.ContentLength != 12 { + t.Fatalf("Expected Content-Length to be 12 but it was %d", props.ContentLength) + } + + t.Logf("[DEBUG] Acquiring Lease..") + leaseDetails, err := blobClient.AcquireLease(ctx, containerName, fileName, AcquireLeaseInput{ + LeaseDuration: -1, + }) + if err != nil { + t.Fatalf("Error acquiring Lease: %s", err) + } + t.Logf("[DEBUG] Lease ID is %q", leaseDetails.LeaseID) + + t.Logf("[DEBUG] Appending Third Block..") + appendInput = AppendBlockInput{ + Content: &[]byte{ + 64, + 35, + 28, + 93, + 11, + 23, + }, + LeaseID: &leaseDetails.LeaseID, + } + if _, err := blobClient.AppendBlock(ctx, containerName, fileName, appendInput); err != nil { + t.Fatalf("Error appending Third block: %s", err) + } + + t.Logf("[DEBUG] Re-Retrieving Properties..") + props, err = blobClient.GetProperties(ctx, containerName, fileName, GetPropertiesInput{ + LeaseID: &leaseDetails.LeaseID, + }) + if err != nil { + t.Fatalf("Error retrieving properties: %s", err) + } + if props.ContentLength != 18 { + t.Fatalf("Expected Content-Length to be 18 but it was %d", props.ContentLength) + } + + t.Logf("[DEBUG] Breaking Lease..") + breakLeaseInput := BreakLeaseInput{ + LeaseID: leaseDetails.LeaseID, + } + if _, err := blobClient.BreakLease(ctx, containerName, fileName, breakLeaseInput); err != nil { + t.Fatalf("Error breaking lease: %s", err) + } + + t.Logf("[DEBUG] Deleting Lease..") + if _, err := blobClient.Delete(ctx, containerName, fileName, DeleteInput{}); err != nil { + t.Fatalf("Error deleting: %s", err) + } +} diff --git a/storage/2023-11-03/blob/blobs/blob_page_test.go b/storage/2023-11-03/blob/blobs/blob_page_test.go new file mode 100644 index 0000000..e812dc4 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/blob_page_test.go @@ -0,0 +1,108 @@ +package blobs + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/containers" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +func TestPageBlobLifecycle(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + containerName := fmt.Sprintf("cont-%d", testhelpers.RandomInt()) + fileName := "append-blob.txt" + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindStorageVTwo) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + + containersClient, err := containers.NewWithBaseUri(fmt.Sprintf("https://%s.blob.%s", testData.StorageAccountName, *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(containersClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + _, err = containersClient.Create(ctx, containerName, containers.CreateInput{}) + if err != nil { + t.Fatal(fmt.Errorf("Error creating: %s", err)) + } + defer containersClient.Delete(ctx, containerName) + + blobClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.blob.%s", testData.StorageAccountName, *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(blobClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + t.Logf("[DEBUG] Putting Page Blob..") + fileSize := int64(10240000) + if _, err := blobClient.PutPageBlob(ctx, containerName, fileName, PutPageBlobInput{ + BlobContentLengthBytes: fileSize, + }); err != nil { + t.Fatalf("Error putting page blob: %s", err) + } + + t.Logf("[DEBUG] Retrieving Properties..") + props, err := blobClient.GetProperties(ctx, containerName, fileName, GetPropertiesInput{}) + if err != nil { + t.Fatalf("Error retrieving properties: %s", err) + } + if props.ContentLength != fileSize { + t.Fatalf("Expected Content-Length to be %d but it was %d", fileSize, props.ContentLength) + } + + for iteration := 1; iteration <= 3; iteration++ { + t.Logf("[DEBUG] Putting Page %d of 3..", iteration) + byteArray := func() []byte { + o := make([]byte, 0) + + for i := 0; i < 512; i++ { + o = append(o, byte(i)) + } + + return o + }() + startByte := int64(512 * iteration) + endByte := int64(startByte + 511) + putPageInput := PutPageUpdateInput{ + StartByte: startByte, + EndByte: endByte, + Content: byteArray, + } + if _, err := blobClient.PutPageUpdate(ctx, containerName, fileName, putPageInput); err != nil { + t.Fatalf("Error putting page: %s", err) + } + } + + t.Logf("[DEBUG] Deleting..") + if _, err := blobClient.Delete(ctx, containerName, fileName, DeleteInput{}); err != nil { + t.Fatalf("Error deleting: %s", err) + } +} diff --git a/storage/2023-11-03/blob/blobs/client.go b/storage/2023-11-03/blob/blobs/client.go new file mode 100644 index 0000000..8cb42d8 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/client.go @@ -0,0 +1,22 @@ +package blobs + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/dataplane/storage" +) + +// Client is the base client for Blob Storage Blobs. +type Client struct { + Client *storage.BaseClient +} + +func NewWithBaseUri(baseUri string) (*Client, error) { + baseClient, err := storage.NewBaseClient(baseUri, componentName, apiVersion) + if err != nil { + return nil, fmt.Errorf("building base client: %+v", err) + } + return &Client{ + Client: baseClient, + }, nil +} diff --git a/storage/2023-11-03/blob/blobs/copy.go b/storage/2023-11-03/blob/blobs/copy.go new file mode 100644 index 0000000..2ff309c --- /dev/null +++ b/storage/2023-11-03/blob/blobs/copy.go @@ -0,0 +1,230 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type CopyInput struct { + // Specifies the name of the source blob or file. + // Beginning with version 2012-02-12, this value may be a URL of up to 2 KB in length that specifies a blob. + // The value should be URL-encoded as it would appear in a request URI. + // A source blob in the same storage account can be authenticated via Shared Key. + // However, if the source is a blob in another account, + // the source blob must either be public or must be authenticated via a shared access signature. + // If the source blob is public, no authentication is required to perform the copy operation. + // + // Beginning with version 2015-02-21, the source object may be a file in the Azure File service. + // If the source object is a file that is to be copied to a blob, then the source file must be authenticated + // using a shared access signature, whether it resides in the same account or in a different account. + // + // Only storage accounts created on or after June 7th, 2012 allow the Copy Blob operation to + // copy from another storage account. + CopySource string + + // The ID of the Lease + // Required if the destination blob has an active lease. + // The lease ID specified for this header must match the lease ID of the destination blob. + // If the request does not include the lease ID or it is not valid, + // the operation fails with status code 412 (Precondition Failed). + // + // If this header is specified and the destination blob does not currently have an active lease, + // the operation will also fail with status code 412 (Precondition Failed). + LeaseID *string + + // The ID of the Lease on the Source Blob + // Specify to perform the Copy Blob operation only if the lease ID matches the active lease ID of the source blob. + SourceLeaseID *string + + // For page blobs on a premium account only. Specifies the tier to be set on the target blob + AccessTier *AccessTier + + // A user-defined name-value pair associated with the blob. + // If no name-value pairs are specified, the operation will copy the metadata from the source blob or + // file to the destination blob. + // If one or more name-value pairs are specified, the destination blob is created with the specified metadata, + // and metadata is not copied from the source blob or file. + MetaData map[string]string + + // An ETag value. + // Specify an ETag value for this conditional header to copy the blob only if the specified + // ETag value matches the ETag value for an existing destination blob. + // If the ETag for the destination blob does not match the ETag specified for If-Match, + // the Blob service returns status code 412 (Precondition Failed). + IfMatch *string + + // An ETag value, or the wildcard character (*). + // Specify an ETag value for this conditional header to copy the blob only if the specified + // ETag value does not match the ETag value for the destination blob. + // Specify the wildcard character (*) to perform the operation only if the destination blob does not exist. + // If the specified condition isn't met, the Blob service returns status code 412 (Precondition Failed). + IfNoneMatch *string + + // A DateTime value. + // Specify this conditional header to copy the blob only if the destination blob + // has been modified since the specified date/time. + // If the destination blob has not been modified, the Blob service returns status code 412 (Precondition Failed). + IfModifiedSince *string + + // A DateTime value. + // Specify this conditional header to copy the blob only if the destination blob + // has not been modified since the specified date/time. + // If the destination blob has been modified, the Blob service returns status code 412 (Precondition Failed). + IfUnmodifiedSince *string + + // An ETag value. + // Specify this conditional header to copy the source blob only if its ETag matches the value specified. + // If the ETag values do not match, the Blob service returns status code 412 (Precondition Failed). + // This cannot be specified if the source is an Azure File. + SourceIfMatch *string + + // An ETag value. + // Specify this conditional header to copy the blob only if its ETag does not match the value specified. + // If the values are identical, the Blob service returns status code 412 (Precondition Failed). + // This cannot be specified if the source is an Azure File. + SourceIfNoneMatch *string + + // A DateTime value. + // Specify this conditional header to copy the blob only if the source blob has been modified + // since the specified date/time. + // If the source blob has not been modified, the Blob service returns status code 412 (Precondition Failed). + // This cannot be specified if the source is an Azure File. + SourceIfModifiedSince *string + + // A DateTime value. + // Specify this conditional header to copy the blob only if the source blob has not been modified + // since the specified date/time. + // If the source blob has been modified, the Blob service returns status code 412 (Precondition Failed). + // This header cannot be specified if the source is an Azure File. + SourceIfUnmodifiedSince *string +} + +type CopyResponse struct { + HttpResponse *client.Response + + CopyID string + CopyStatus string +} + +// Copy copies a blob to a destination within the storage account asynchronously. +func (c Client) Copy(ctx context.Context, containerName, blobName string, input CopyInput) (resp CopyResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if input.CopySource == "" { + return resp, fmt.Errorf("`input.CopySource` cannot be an empty string") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodPut, + OptionsObject: copyOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.CopyID = resp.HttpResponse.Header.Get("x-ms-copy-id") + resp.CopyStatus = resp.HttpResponse.Header.Get("x-ms-copy-status") + } + } + + return +} + +type copyOptions struct { + input CopyInput +} + +func (c copyOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("x-ms-copy-source", c.input.CopySource) + + if c.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *c.input.LeaseID) + } + + if c.input.SourceLeaseID != nil { + headers.Append("x-ms-source-lease-id", *c.input.SourceLeaseID) + } + + if c.input.AccessTier != nil { + headers.Append("x-ms-access-tier", string(*c.input.AccessTier)) + } + + if c.input.IfMatch != nil { + headers.Append("If-Match", *c.input.IfMatch) + } + + if c.input.IfNoneMatch != nil { + headers.Append("If-None-Match", *c.input.IfNoneMatch) + } + + if c.input.IfUnmodifiedSince != nil { + headers.Append("If-Unmodified-Since", *c.input.IfUnmodifiedSince) + } + + if c.input.IfModifiedSince != nil { + headers.Append("If-Modified-Since", *c.input.IfModifiedSince) + } + + if c.input.SourceIfMatch != nil { + headers.Append("x-ms-source-if-match", *c.input.SourceIfMatch) + } + + if c.input.SourceIfNoneMatch != nil { + headers.Append("x-ms-source-if-none-match", *c.input.SourceIfNoneMatch) + } + + if c.input.SourceIfModifiedSince != nil { + headers.Append("x-ms-source-if-modified-since", *c.input.SourceIfModifiedSince) + } + + if c.input.SourceIfUnmodifiedSince != nil { + headers.Append("x-ms-source-if-unmodified-since", *c.input.SourceIfUnmodifiedSince) + } + + headers.Merge(metadata.SetMetaDataHeaders(c.input.MetaData)) + + return headers +} + +func (c copyOptions) ToOData() *odata.Query { + return nil +} + +func (c copyOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/blob/blobs/copy_abort.go b/storage/2023-11-03/blob/blobs/copy_abort.go new file mode 100644 index 0000000..064c56f --- /dev/null +++ b/storage/2023-11-03/blob/blobs/copy_abort.go @@ -0,0 +1,94 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type AbortCopyInput struct { + // The Copy ID which should be aborted + CopyID string + + // The ID of the Lease + // This must be specified if a Lease is present on the Blob, else a 403 is returned + LeaseID *string +} + +type CopyAbortResponse struct { + HttpResponse *client.Response +} + +// AbortCopy aborts a pending Copy Blob operation, and leaves a destination blob with zero length and full metadata. +func (c Client) AbortCopy(ctx context.Context, containerName, blobName string, input AbortCopyInput) (resp CopyAbortResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if input.CopyID == "" { + return resp, fmt.Errorf("`input.CopyID` cannot be an empty string") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusNoContent, + }, + HttpMethod: http.MethodPut, + OptionsObject: copyAbortOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type copyAbortOptions struct { + input AbortCopyInput +} + +func (c copyAbortOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("x-ms-copy-action", "abort") + if c.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *c.input.LeaseID) + } + + return headers +} + +func (c copyAbortOptions) ToOData() *odata.Query { + return nil +} + +func (c copyAbortOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "copy") + out.Append("copyid", c.input.CopyID) + return out +} diff --git a/storage/2023-11-03/blob/blobs/copy_and_wait.go b/storage/2023-11-03/blob/blobs/copy_and_wait.go new file mode 100644 index 0000000..5e2e349 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/copy_and_wait.go @@ -0,0 +1,41 @@ +package blobs + +import ( + "context" + "fmt" + "time" +) + +// CopyAndWait copies a blob to a destination within the storage account and waits for it to finish copying. +func (c Client) CopyAndWait(ctx context.Context, containerName, blobName string, input CopyInput, pollingInterval time.Duration) error { + if _, err := c.Copy(ctx, containerName, blobName, input); err != nil { + return fmt.Errorf("error copying: %s", err) + } + + for true { + getInput := GetPropertiesInput{ + LeaseID: input.LeaseID, + } + getResult, err := c.GetProperties(ctx, containerName, blobName, getInput) + if err != nil { + return fmt.Errorf("") + } + + switch getResult.CopyStatus { + case Aborted: + return fmt.Errorf("Copy was aborted: %s", getResult.CopyStatusDescription) + + case Failed: + return fmt.Errorf("Copy failed: %s", getResult.CopyStatusDescription) + + case Success: + return nil + + case Pending: + time.Sleep(pollingInterval) + continue + } + } + + return fmt.Errorf("unexpected error waiting for the copy to complete") +} diff --git a/storage/2023-11-03/blob/blobs/copy_test.go b/storage/2023-11-03/blob/blobs/copy_test.go new file mode 100644 index 0000000..f09f8e6 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/copy_test.go @@ -0,0 +1,184 @@ +package blobs + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/containers" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +func TestCopyFromExistingFile(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + containerName := fmt.Sprintf("cont-%d", testhelpers.RandomInt()) + fileName := "ubuntu.iso" + copiedFileName := "copied.iso" + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindBlobStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + + containersClient, err := containers.NewWithBaseUri(fmt.Sprintf("https://%s.blob.%s", testData.StorageAccountName, *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(containersClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + _, err = containersClient.Create(ctx, containerName, containers.CreateInput{}) + if err != nil { + t.Fatal(fmt.Errorf("Error creating: %s", err)) + } + defer containersClient.Delete(ctx, containerName) + + baseUri := fmt.Sprintf("https://%s.blob.%s", testData.StorageAccountName, *domainSuffix) + blobClient, err := NewWithBaseUri(baseUri) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(blobClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + t.Logf("[DEBUG] Copying file to Blob Storage..") + copyInput := CopyInput{ + CopySource: "http://releases.ubuntu.com/14.04/ubuntu-14.04.6-desktop-amd64.iso", + } + + refreshInterval := 5 * time.Second + if err := blobClient.CopyAndWait(ctx, containerName, fileName, copyInput, refreshInterval); err != nil { + t.Fatalf("Error copying: %s", err) + } + + t.Logf("[DEBUG] Duplicating that file..") + copiedInput := CopyInput{ + CopySource: fmt.Sprintf("%s/%s/%s", baseUri, containerName, fileName), + } + if err := blobClient.CopyAndWait(ctx, containerName, copiedFileName, copiedInput, refreshInterval); err != nil { + t.Fatalf("Error duplicating file: %s", err) + } + + t.Logf("[DEBUG] Retrieving Properties for the Original File..") + props, err := blobClient.GetProperties(ctx, containerName, fileName, GetPropertiesInput{}) + if err != nil { + t.Fatalf("Error getting properties for the original file: %s", err) + } + + t.Logf("[DEBUG] Retrieving Properties for the Copied File..") + copiedProps, err := blobClient.GetProperties(ctx, containerName, copiedFileName, GetPropertiesInput{}) + if err != nil { + t.Fatalf("Error getting properties for the copied file: %s", err) + } + + if props.ContentLength != copiedProps.ContentLength { + t.Fatalf("Expected the content length to be %d but it was %d", props.ContentLength, copiedProps.ContentLength) + } + + t.Logf("[DEBUG] Deleting copied file..") + if _, err := blobClient.Delete(ctx, containerName, copiedFileName, DeleteInput{}); err != nil { + t.Fatalf("Error deleting file: %s", err) + } + + t.Logf("[DEBUG] Deleting original file..") + if _, err := blobClient.Delete(ctx, containerName, fileName, DeleteInput{}); err != nil { + t.Fatalf("Error deleting file: %s", err) + } +} + +func TestCopyFromURL(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + containerName := fmt.Sprintf("cont-%d", testhelpers.RandomInt()) + fileName := "ubuntu.iso" + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindBlobStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + + containersClient, err := containers.NewWithBaseUri(fmt.Sprintf("https://%s.blob.%s", testData.StorageAccountName, *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(containersClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + _, err = containersClient.Create(ctx, containerName, containers.CreateInput{}) + if err != nil { + t.Fatal(fmt.Errorf("Error creating: %s", err)) + } + defer containersClient.Delete(ctx, containerName) + + blobClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.blob.%s", testData.StorageAccountName, *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(blobClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + t.Logf("[DEBUG] Copying file to Blob Storage..") + copyInput := CopyInput{ + CopySource: "http://releases.ubuntu.com/14.04/ubuntu-14.04.6-desktop-amd64.iso", + } + + refreshInterval := 5 * time.Second + if err := blobClient.CopyAndWait(ctx, containerName, fileName, copyInput, refreshInterval); err != nil { + t.Fatalf("Error copying: %s", err) + } + + t.Logf("[DEBUG] Retrieving Properties..") + props, err := blobClient.GetProperties(ctx, containerName, fileName, GetPropertiesInput{}) + if err != nil { + t.Fatalf("Error getting properties: %s", err) + } + + if props.ContentLength == 0 { + t.Fatalf("Expected the file to be there but looks like it isn't: %d", props.ContentLength) + } + + t.Logf("[DEBUG] Deleting file..") + if _, err := blobClient.Delete(ctx, containerName, fileName, DeleteInput{}); err != nil { + t.Fatalf("Error deleting file: %s", err) + } +} diff --git a/storage/2023-11-03/blob/blobs/delete.go b/storage/2023-11-03/blob/blobs/delete.go new file mode 100644 index 0000000..e13aaa5 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/delete.go @@ -0,0 +1,92 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type DeleteInput struct { + // Should any Snapshots for this Blob also be deleted? + // If the Blob has Snapshots and this is set to False a 409 Conflict will be returned + DeleteSnapshots bool + + // The ID of the Lease + // This must be specified if a Lease is present on the Blob, else a 403 is returned + LeaseID *string +} + +type DeleteResponse struct { + HttpResponse *client.Response +} + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +func (c Client) Delete(ctx context.Context, containerName, blobName string, input DeleteInput) (resp DeleteResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodDelete, + OptionsObject: deleteOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type deleteOptions struct { + input DeleteInput +} + +func (d deleteOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + if d.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *d.input.LeaseID) + } + + if d.input.DeleteSnapshots { + headers.Append("x-ms-delete-snapshots", "include") + } + + return headers +} + +func (d deleteOptions) ToOData() *odata.Query { + return nil +} + +func (d deleteOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/blob/blobs/delete_snapshot.go b/storage/2023-11-03/blob/blobs/delete_snapshot.go new file mode 100644 index 0000000..0407eb1 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/delete_snapshot.go @@ -0,0 +1,92 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type DeleteSnapshotInput struct { + // The ID of the Lease + // This must be specified if a Lease is present on the Blob, else a 403 is returned + LeaseID *string + + // The DateTime of the Snapshot which should be marked for Deletion + SnapshotDateTime string +} + +type DeleteSnapshotResponse struct { + HttpResponse *client.Response +} + +// DeleteSnapshot marks a single Snapshot of a Blob for Deletion based on it's DateTime, which will be deleted during the next Garbage Collection cycle. +func (c Client) DeleteSnapshot(ctx context.Context, containerName, blobName string, input DeleteSnapshotInput) (resp DeleteSnapshotResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if input.SnapshotDateTime == "" { + return resp, fmt.Errorf("`input.SnapshotDateTime` cannot be an empty string") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodDelete, + OptionsObject: deleteSnapshotOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type deleteSnapshotOptions struct { + input DeleteSnapshotInput +} + +func (d deleteSnapshotOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + if d.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *d.input.LeaseID) + } + + return headers +} + +func (d deleteSnapshotOptions) ToOData() *odata.Query { + return nil +} + +func (d deleteSnapshotOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("snapshot", d.input.SnapshotDateTime) + return out +} diff --git a/storage/2023-11-03/blob/blobs/delete_snapshots.go b/storage/2023-11-03/blob/blobs/delete_snapshots.go new file mode 100644 index 0000000..34b3beb --- /dev/null +++ b/storage/2023-11-03/blob/blobs/delete_snapshots.go @@ -0,0 +1,84 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type DeleteSnapshotsInput struct { + // The ID of the Lease + // This must be specified if a Lease is present on the Blob, else a 403 is returned + LeaseID *string +} + +type DeleteSnapshotsResponse struct { + HttpResponse *client.Response +} + +// DeleteSnapshots marks all Snapshots of a Blob for Deletion, which will be deleted during the next Garbage Collection Cycle. +func (c Client) DeleteSnapshots(ctx context.Context, containerName, blobName string, input DeleteSnapshotsInput) (resp DeleteSnapshotsResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodDelete, + OptionsObject: deleteSnapshotsOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type deleteSnapshotsOptions struct { + input DeleteSnapshotsInput +} + +func (d deleteSnapshotsOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("x-ms-delete-snapshots", "only") + + if d.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *d.input.LeaseID) + } + return headers +} + +func (d deleteSnapshotsOptions) ToOData() *odata.Query { + return nil +} + +func (d deleteSnapshotsOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/blob/blobs/get.go b/storage/2023-11-03/blob/blobs/get.go new file mode 100644 index 0000000..0f5572a --- /dev/null +++ b/storage/2023-11-03/blob/blobs/get.go @@ -0,0 +1,101 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type GetInput struct { + LeaseID *string + StartByte *int64 + EndByte *int64 +} + +type GetResponse struct { + HttpResponse *client.Response + + Contents []byte +} + +// Get reads or downloads a blob from the system, including its metadata and properties. +func (c Client) Get(ctx context.Context, containerName, blobName string, input GetInput) (resp GetResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if input.LeaseID != nil && *input.LeaseID == "" { + return resp, fmt.Errorf("`input.LeaseID` should either be specified or nil, not an empty string") + } + + if (input.StartByte != nil && input.EndByte == nil) || input.StartByte == nil && input.EndByte != nil { + return resp, fmt.Errorf("`input.StartByte` and `input.EndByte` must both be specified, or both be nil") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusOK, + http.StatusPartialContent, + }, + HttpMethod: http.MethodGet, + OptionsObject: getOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + err = resp.HttpResponse.Unmarshal(&resp.Contents) + if err != nil { + return resp, fmt.Errorf("unmarshalling response: %v", err) + } + } + + return +} + +type getOptions struct { + input GetInput +} + +func (g getOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + if g.input.StartByte != nil && g.input.EndByte != nil { + headers.Append("x-ms-range", fmt.Sprintf("bytes=%d-%d", *g.input.StartByte, *g.input.EndByte)) + } + return headers + +} + +func (g getOptions) ToOData() *odata.Query { + return nil +} + +func (g getOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/blob/blobs/get_block_list.go b/storage/2023-11-03/blob/blobs/get_block_list.go new file mode 100644 index 0000000..9a783cb --- /dev/null +++ b/storage/2023-11-03/blob/blobs/get_block_list.go @@ -0,0 +1,122 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type GetBlockListInput struct { + BlockListType BlockListType + LeaseID *string +} + +type GetBlockListResponse struct { + HttpResponse *client.Response + + // The size of the blob in bytes + BlobContentLength *int64 + + // The Content Type of the blob + ContentType string + + // The ETag associated with this blob + ETag string + + // A list of blocks which have been committed + CommittedBlocks CommittedBlocks `xml:"CommittedBlocks,omitempty"` + + // A list of blocks which have not yet been committed + UncommittedBlocks UncommittedBlocks `xml:"UncommittedBlocks,omitempty"` +} + +// GetBlockList retrieves the list of blocks that have been uploaded as part of a block blob. +func (c Client) GetBlockList(ctx context.Context, containerName, blobName string, input GetBlockListInput) (resp GetBlockListResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: getBlockListOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.ContentType = resp.HttpResponse.Header.Get("Content-Type") + resp.ETag = resp.HttpResponse.Header.Get("ETag") + + if v := resp.HttpResponse.Header.Get("x-ms-blob-content-length"); v != "" { + i, innerErr := strconv.Atoi(v) + if innerErr != nil { + err = fmt.Errorf("error parsing %q as an integer: %s", v, innerErr) + return + } + + i64 := int64(i) + resp.BlobContentLength = &i64 + } + } + err = resp.HttpResponse.Unmarshal(&resp) + if err != nil { + return resp, fmt.Errorf("unmarshalling response: %v", err) + } + } + + return +} + +type getBlockListOptions struct { + input GetBlockListInput +} + +func (g getBlockListOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + if g.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *g.input.LeaseID) + } + return headers +} + +func (g getBlockListOptions) ToOData() *odata.Query { + return nil +} + +func (g getBlockListOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("blocklisttype", string(g.input.BlockListType)) + out.Append("comp", "blocklist") + return out +} diff --git a/storage/2023-11-03/blob/blobs/get_page_ranges.go b/storage/2023-11-03/blob/blobs/get_page_ranges.go new file mode 100644 index 0000000..6d0f541 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/get_page_ranges.go @@ -0,0 +1,138 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type GetPageRangesInput struct { + LeaseID *string + + StartByte *int64 + EndByte *int64 +} + +type GetPageRangesResponse struct { + HttpResponse *client.Response + + // The size of the blob in bytes + ContentLength *int64 + + // The Content Type of the blob + ContentType string + + // The ETag associated with this blob + ETag string + + PageRanges []PageRange `xml:"PageRange"` +} + +type PageRange struct { + // The start byte offset for this range, inclusive + Start int64 `xml:"Start"` + + // The end byte offset for this range, inclusive + End int64 `xml:"End"` +} + +// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob. +func (c Client) GetPageRanges(ctx context.Context, containerName, blobName string, input GetPageRangesInput) (resp GetPageRangesResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if (input.StartByte != nil && input.EndByte == nil) || input.StartByte == nil && input.EndByte != nil { + return resp, fmt.Errorf("`input.StartByte` and `input.EndByte` must both be specified, or both be nil") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: getPageRangesOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.ContentType = resp.HttpResponse.Header.Get("Content-Type") + resp.ETag = resp.HttpResponse.Header.Get("ETag") + + if v := resp.HttpResponse.Header.Get("x-ms-blob-content-length"); v != "" { + i, innerErr := strconv.Atoi(v) + if innerErr != nil { + err = fmt.Errorf("Error parsing %q as an integer: %s", v, innerErr) + return + } + + i64 := int64(i) + resp.ContentLength = &i64 + } + } + + err = resp.HttpResponse.Unmarshal(&resp) + if err != nil { + return resp, fmt.Errorf("unmarshalling response: %s", err) + } + } + + return +} + +type getPageRangesOptions struct { + input GetPageRangesInput +} + +func (g getPageRangesOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + if g.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *g.input.LeaseID) + } + + if g.input.StartByte != nil && g.input.EndByte != nil { + headers.Append("x-ms-range", fmt.Sprintf("bytes=%d-%d", *g.input.StartByte, *g.input.EndByte)) + } + + return headers +} + +func (g getPageRangesOptions) ToOData() *odata.Query { + return nil +} + +func (g getPageRangesOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "pagelist") + return out +} diff --git a/storage/2023-11-03/blob/blobs/incremental_copy_blob.go b/storage/2023-11-03/blob/blobs/incremental_copy_blob.go new file mode 100644 index 0000000..d059f11 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/incremental_copy_blob.go @@ -0,0 +1,105 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type IncrementalCopyBlobInput struct { + CopySource string + IfModifiedSince *string + IfUnmodifiedSince *string + IfMatch *string + IfNoneMatch *string +} + +type IncrementalCopyBlob struct { + HttpResponse *client.Response +} + +// IncrementalCopyBlob copies a snapshot of the source page blob to a destination page blob. +// The snapshot is copied such that only the differential changes between the previously copied +// snapshot are transferred to the destination. +// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. +func (c Client) IncrementalCopyBlob(ctx context.Context, containerName, blobName string, input IncrementalCopyBlobInput) (resp IncrementalCopyBlob, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if input.CopySource == "" { + return resp, fmt.Errorf("`input.CopySource` cannot be an empty string") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodPut, + OptionsObject: incrementalCopyBlobOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type incrementalCopyBlobOptions struct { + input IncrementalCopyBlobInput +} + +func (i incrementalCopyBlobOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + headers.Append("x-ms-copy-source", i.input.CopySource) + + if i.input.IfModifiedSince != nil { + headers.Append("If-Modified-Since", *i.input.IfModifiedSince) + } + if i.input.IfUnmodifiedSince != nil { + headers.Append("If-Unmodified-Since", *i.input.IfUnmodifiedSince) + } + if i.input.IfMatch != nil { + headers.Append("If-Match", *i.input.IfMatch) + } + if i.input.IfNoneMatch != nil { + headers.Append("If-None-Match", *i.input.IfNoneMatch) + } + return headers +} + +func (i incrementalCopyBlobOptions) ToOData() *odata.Query { + return nil +} + +func (i incrementalCopyBlobOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "incrementalcopy") + return out +} diff --git a/storage/2023-11-03/blob/blobs/lease_acquire.go b/storage/2023-11-03/blob/blobs/lease_acquire.go new file mode 100644 index 0000000..214808c --- /dev/null +++ b/storage/2023-11-03/blob/blobs/lease_acquire.go @@ -0,0 +1,120 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type AcquireLeaseInput struct { + // The ID of the existing Lease, if leased + LeaseID *string + + // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. + // A non-infinite lease can be between 15 and 60 seconds + LeaseDuration int + + // The Proposed new ID for the Lease + ProposedLeaseID *string +} + +type AcquireLeaseResponse struct { + HttpResponse *client.Response + + LeaseID string +} + +// AcquireLease establishes and manages a lock on a blob for write and delete operations. +func (c Client) AcquireLease(ctx context.Context, containerName, blobName string, input AcquireLeaseInput) (resp AcquireLeaseResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if input.LeaseID != nil && *input.LeaseID == "" { + return resp, fmt.Errorf("`input.LeaseID` cannot be an empty string, if specified") + } + + if input.ProposedLeaseID != nil && *input.ProposedLeaseID == "" { + return resp, fmt.Errorf("`input.ProposedLeaseID` cannot be an empty string, if specified") + } + // An infinite lease duration is -1 seconds. A non-infinite lease can be between 15 and 60 seconds + if input.LeaseDuration != -1 && (input.LeaseDuration <= 15 || input.LeaseDuration >= 60) { + return resp, fmt.Errorf("`input.LeaseDuration` must be -1 (infinite), or between 15 and 60 seconds") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: acquireLeaseOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.LeaseID = resp.HttpResponse.Header.Get("x-ms-lease-id") + } + } + + return +} + +type acquireLeaseOptions struct { + input AcquireLeaseInput +} + +func (a acquireLeaseOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + headers.Append("x-ms-lease-action", "acquire") + headers.Append("x-ms-lease-duration", strconv.Itoa(a.input.LeaseDuration)) + + if a.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *a.input.LeaseID) + } + + if a.input.ProposedLeaseID != nil { + headers.Append("x-ms-proposed-lease-id", *a.input.ProposedLeaseID) + } + + return headers +} + +func (a acquireLeaseOptions) ToOData() *odata.Query { + return nil +} + +func (a acquireLeaseOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "lease") + return out +} diff --git a/storage/2023-11-03/blob/blobs/lease_break.go b/storage/2023-11-03/blob/blobs/lease_break.go new file mode 100644 index 0000000..858c306 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/lease_break.go @@ -0,0 +1,106 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type BreakLeaseInput struct { + // For a break operation, proposed duration the lease should continue + // before it is broken, in seconds, between 0 and 60. + // This break period is only used if it is shorter than the time remaining on the lease. + // If longer, the time remaining on the lease is used. + // A new lease will not be available before the break period has expired, + // but the lease may be held for longer than the break period. + // If this header does not appear with a break operation, a fixed-duration lease breaks + // after the remaining lease period elapses, and an infinite lease breaks immediately. + BreakPeriod *int + + LeaseID string +} + +type BreakLeaseResponse struct { + HttpResponse *client.Response + + // Approximate time remaining in the lease period, in seconds. + // If the break is immediate, 0 is returned. + LeaseTime int +} + +// BreakLease breaks an existing lock on a blob using the LeaseID. +func (c Client) BreakLease(ctx context.Context, containerName, blobName string, input BreakLeaseInput) (resp BreakLeaseResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if input.LeaseID == "" { + return resp, fmt.Errorf("`input.LeaseID` cannot be an empty string") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodPut, + OptionsObject: breakLeaseOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type breakLeaseOptions struct { + input BreakLeaseInput +} + +func (b breakLeaseOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + headers.Append("x-ms-lease-action", "break") + headers.Append("x-ms-lease-id", b.input.LeaseID) + + if b.input.BreakPeriod != nil { + headers.Append("x-ms-lease-break-period", strconv.Itoa(*b.input.BreakPeriod)) + } + + return headers +} + +func (b breakLeaseOptions) ToOData() *odata.Query { + return nil +} + +func (b breakLeaseOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "lease") + return out +} diff --git a/storage/2023-11-03/blob/blobs/lease_change.go b/storage/2023-11-03/blob/blobs/lease_change.go new file mode 100644 index 0000000..c69d054 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/lease_change.go @@ -0,0 +1,99 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type ChangeLeaseInput struct { + ExistingLeaseID string + ProposedLeaseID string +} + +type ChangeLeaseResponse struct { + HttpResponse *client.Response + + LeaseID string +} + +// ChangeLease changes an existing lock on a blob for another lock. +func (c Client) ChangeLease(ctx context.Context, containerName, blobName string, input ChangeLeaseInput) (resp ChangeLeaseResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if input.ExistingLeaseID == "" { + return resp, fmt.Errorf("`input.ExistingLeaseID` cannot be an empty string") + } + + if input.ProposedLeaseID == "" { + return resp, fmt.Errorf("`input.ProposedLeaseID` cannot be an empty string") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPut, + OptionsObject: changeLeaseOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.LeaseID = resp.HttpResponse.Header.Get("x-ms-lease-id") + } + } + + return +} + +type changeLeaseOptions struct { + input ChangeLeaseInput +} + +func (c changeLeaseOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("x-ms-lease-action", "change") + headers.Append("x-ms-lease-id", c.input.ExistingLeaseID) + headers.Append("x-ms-proposed-lease-id", c.input.ProposedLeaseID) + return headers +} + +func (c changeLeaseOptions) ToOData() *odata.Query { + return nil +} + +func (c changeLeaseOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "lease") + return out +} diff --git a/storage/2023-11-03/blob/blobs/lease_release.go b/storage/2023-11-03/blob/blobs/lease_release.go new file mode 100644 index 0000000..350493d --- /dev/null +++ b/storage/2023-11-03/blob/blobs/lease_release.go @@ -0,0 +1,85 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type ReleaseLeaseResponse struct { + HttpResponse *client.Response +} + +type ReleaseLeaseInput struct { + LeaseID string +} + +// ReleaseLease releases a lock based on the Lease ID. +func (c Client) ReleaseLease(ctx context.Context, containerName, blobName string, input ReleaseLeaseInput) (resp ReleaseLeaseResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if input.LeaseID == "" { + return resp, fmt.Errorf("`input.LeaseID` cannot be an empty string") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPut, + OptionsObject: releaseLeaseOptions{ + leaseID: input.LeaseID, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type releaseLeaseOptions struct { + leaseID string +} + +func (r releaseLeaseOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("x-ms-lease-action", "release") + headers.Append("x-ms-lease-id", r.leaseID) + return headers +} + +func (r releaseLeaseOptions) ToOData() *odata.Query { + return nil +} + +func (r releaseLeaseOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "lease") + return out +} diff --git a/storage/2023-11-03/blob/blobs/lease_renew.go b/storage/2023-11-03/blob/blobs/lease_renew.go new file mode 100644 index 0000000..2a82a0a --- /dev/null +++ b/storage/2023-11-03/blob/blobs/lease_renew.go @@ -0,0 +1,84 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type RenewLeaseResponse struct { + HttpResponse *client.Response +} + +type RenewLeaseInput struct { + LeaseID string +} + +func (c Client) RenewLease(ctx context.Context, containerName, blobName string, input RenewLeaseInput) (resp RenewLeaseResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if input.LeaseID == "" { + return resp, fmt.Errorf("`input.LeaseID` cannot be an empty string") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPut, + OptionsObject: renewLeaseOptions{ + leaseID: input.LeaseID, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type renewLeaseOptions struct { + leaseID string +} + +func (r renewLeaseOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("x-ms-lease-action", "renew") + headers.Append("x-ms-lease-id", r.leaseID) + return headers +} + +func (r renewLeaseOptions) ToOData() *odata.Query { + return nil +} + +func (r renewLeaseOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "lease") + return out +} diff --git a/storage/2023-11-03/blob/blobs/lease_test.go b/storage/2023-11-03/blob/blobs/lease_test.go new file mode 100644 index 0000000..de04760 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/lease_test.go @@ -0,0 +1,124 @@ +package blobs + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/containers" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +func TestLeaseLifecycle(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + containerName := fmt.Sprintf("cont-%d", testhelpers.RandomInt()) + fileName := "ubuntu.iso" + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindBlobStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + + containersClient, err := containers.NewWithBaseUri(fmt.Sprintf("https://%s.blob.%s", testData.StorageAccountName, *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(containersClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + _, err = containersClient.Create(ctx, containerName, containers.CreateInput{}) + if err != nil { + t.Fatal(fmt.Errorf("error creating: %s", err)) + } + defer containersClient.Delete(ctx, containerName) + + blobClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.blob.%s", testData.StorageAccountName, *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(blobClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + t.Logf("[DEBUG] Copying file to Blob Storage..") + copyInput := CopyInput{ + CopySource: "http://releases.ubuntu.com/14.04/ubuntu-14.04.6-desktop-amd64.iso", + } + + refreshInterval := 5 * time.Second + if err := blobClient.CopyAndWait(ctx, containerName, fileName, copyInput, refreshInterval); err != nil { + t.Fatalf("Error copying: %s", err) + } + defer blobClient.Delete(ctx, containerName, fileName, DeleteInput{}) + + // Test begins here + t.Logf("[DEBUG] Acquiring Lease..") + leaseInput := AcquireLeaseInput{ + LeaseDuration: -1, + } + leaseInfo, err := blobClient.AcquireLease(ctx, containerName, fileName, leaseInput) + if err != nil { + t.Fatalf("Error acquiring lease: %s", err) + } + t.Logf("[DEBUG] Lease ID: %q", leaseInfo.LeaseID) + + t.Logf("[DEBUG] Changing Lease..") + changeLeaseInput := ChangeLeaseInput{ + ExistingLeaseID: leaseInfo.LeaseID, + ProposedLeaseID: "31f5bb01-cdd9-4166-bcdc-95186076bde0", + } + changeLeaseResult, err := blobClient.ChangeLease(ctx, containerName, fileName, changeLeaseInput) + if err != nil { + t.Fatalf("Error changing lease: %s", err) + } + t.Logf("[DEBUG] New Lease ID: %q", changeLeaseResult.LeaseID) + + t.Logf("[DEBUG] Releasing Lease..") + if _, err := blobClient.ReleaseLease(ctx, containerName, fileName, ReleaseLeaseInput{LeaseID: changeLeaseResult.LeaseID}); err != nil { + t.Fatalf("Error releasing lease: %s", err) + } + + t.Logf("[DEBUG] Acquiring a new lease..") + leaseInput = AcquireLeaseInput{ + LeaseDuration: 30, + } + leaseInfo, err = blobClient.AcquireLease(ctx, containerName, fileName, leaseInput) + if err != nil { + t.Fatalf("Error acquiring lease: %s", err) + } + t.Logf("[DEBUG] Lease ID: %q", leaseInfo.LeaseID) + + t.Logf("[DEBUG] Renewing lease..") + if _, err := blobClient.RenewLease(ctx, containerName, fileName, RenewLeaseInput{LeaseID: leaseInfo.LeaseID}); err != nil { + t.Fatalf("Error renewing lease: %s", err) + } + + t.Logf("[DEBUG] Breaking lease..") + breakLeaseInput := BreakLeaseInput{ + LeaseID: leaseInfo.LeaseID, + } + if _, err := blobClient.BreakLease(ctx, containerName, fileName, breakLeaseInput); err != nil { + t.Fatalf("Error breaking lease: %s", err) + } +} diff --git a/storage/2023-11-03/blob/blobs/lifecycle_test.go b/storage/2023-11-03/blob/blobs/lifecycle_test.go new file mode 100644 index 0000000..a6c26fd --- /dev/null +++ b/storage/2023-11-03/blob/blobs/lifecycle_test.go @@ -0,0 +1,180 @@ +package blobs + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/containers" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +var _ StorageBlob = Client{} + +func TestLifecycle(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + containerName := fmt.Sprintf("cont-%d", testhelpers.RandomInt()) + fileName := "example.txt" + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindStorageVTwo) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + + containersClient, err := containers.NewWithBaseUri(fmt.Sprintf("https://%s.blob.%s", testData.StorageAccountName, *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(containersClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + _, err = containersClient.Create(ctx, containerName, containers.CreateInput{}) + if err != nil { + t.Fatal(fmt.Errorf("error creating: %s", err)) + } + defer containersClient.Delete(ctx, containerName) + + blobClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.blob.%s", testData.StorageAccountName, *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(blobClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + t.Logf("[DEBUG] Copying file to Blob Storage..") + copyInput := CopyInput{ + CopySource: "http://releases.ubuntu.com/14.04/ubuntu-14.04.6-desktop-amd64.iso", + } + + refreshInterval := 5 * time.Second + if err := blobClient.CopyAndWait(ctx, containerName, fileName, copyInput, refreshInterval); err != nil { + t.Fatalf("Error copying: %s", err) + } + + t.Logf("[DEBUG] Retrieving Blob Properties..") + details, err := blobClient.GetProperties(ctx, containerName, fileName, GetPropertiesInput{}) + if err != nil { + t.Fatalf("Error retrieving properties: %s", err) + } + + // default value + if details.AccessTier != Hot { + t.Fatalf("Expected the AccessTier to be %q but got %q", Hot, details.AccessTier) + } + if details.BlobType != BlockBlob { + t.Fatalf("Expected BlobType to be %q but got %q", BlockBlob, details.BlobType) + } + if len(details.MetaData) != 0 { + t.Fatalf("Expected there to be no items of metadata but got %d", len(details.MetaData)) + } + + t.Logf("[DEBUG] Checking it's returned in the List API..") + listInput := containers.ListBlobsInput{} + listResult, err := containersClient.ListBlobs(ctx, containerName, listInput) + if err != nil { + t.Fatalf("Error listing blobs: %s", err) + } + + if model := listResult.Model; model != nil { + if len(model.Blobs.Blobs) != 1 { + t.Fatalf("Expected there to be 1 blob in the container but got %d", len(model.Blobs.Blobs)) + } + } + + t.Logf("[DEBUG] Setting MetaData..") + metaDataInput := SetMetaDataInput{ + MetaData: map[string]string{ + "hello": "there", + }, + } + if _, err := blobClient.SetMetaData(ctx, containerName, fileName, metaDataInput); err != nil { + t.Fatalf("Error setting MetaData: %s", err) + } + + t.Logf("[DEBUG] Re-retrieving Blob Properties..") + details, err = blobClient.GetProperties(ctx, containerName, fileName, GetPropertiesInput{}) + if err != nil { + t.Fatalf("Error re-retrieving properties: %s", err) + } + + // default value + if details.AccessTier != Hot { + t.Fatalf("Expected the AccessTier to be %q but got %q", Hot, details.AccessTier) + } + if details.BlobType != BlockBlob { + t.Fatalf("Expected BlobType to be %q but got %q", BlockBlob, details.BlobType) + } + if len(details.MetaData) != 1 { + t.Fatalf("Expected there to be 1 item of metadata but got %d", len(details.MetaData)) + } + if details.MetaData["hello"] != "there" { + t.Fatalf("Expected `hello` to be `there` but got %q", details.MetaData["there"]) + } + + t.Logf("[DEBUG] Retrieving the Block List..") + getBlockListInput := GetBlockListInput{ + BlockListType: All, + } + blockList, err := blobClient.GetBlockList(ctx, containerName, fileName, getBlockListInput) + if err != nil { + t.Fatalf("Error retrieving Block List: %s", err) + } + + // since this is a copy from an existing file, all blocks should be present + if len(blockList.CommittedBlocks.Blocks) == 0 { + t.Fatalf("Expected there to be committed blocks but there weren't!") + } + if len(blockList.UncommittedBlocks.Blocks) != 0 { + t.Fatalf("Expected all blocks to be committed but got %d uncommitted blocks", len(blockList.UncommittedBlocks.Blocks)) + } + + t.Logf("[DEBUG] Changing the Access Tiers..") + tiers := []AccessTier{ + Hot, + Cool, + Archive, + } + for _, tier := range tiers { + t.Logf("[DEBUG] Updating the Access Tier to %q..", string(tier)) + if _, err := blobClient.SetTier(ctx, containerName, fileName, SetTierInput{Tier: tier}); err != nil { + t.Fatalf("Error setting the Access Tier: %s", err) + } + + t.Logf("[DEBUG] Re-retrieving Blob Properties..") + details, err = blobClient.GetProperties(ctx, containerName, fileName, GetPropertiesInput{}) + if err != nil { + t.Fatalf("Error re-retrieving properties: %s", err) + } + + if details.AccessTier != tier { + t.Fatalf("Expected the AccessTier to be %q but got %q", tier, details.AccessTier) + } + } + + t.Logf("[DEBUG] Deleting Blob") + if _, err := blobClient.Delete(ctx, containerName, fileName, DeleteInput{}); err != nil { + t.Fatalf("Error deleting Blob: %s", err) + } +} diff --git a/storage/2023-11-03/blob/blobs/metadata_set.go b/storage/2023-11-03/blob/blobs/metadata_set.go new file mode 100644 index 0000000..604e7a7 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/metadata_set.go @@ -0,0 +1,93 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type SetMetaDataInput struct { + // The ID of the Lease + // This must be specified if a Lease is present on the Blob, else a 403 is returned + LeaseID *string + + // Any metadata which should be added to this blob + MetaData map[string]string +} + +type SetMetaDataResponse struct { + HttpResponse *client.Response +} + +// SetMetaData marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +func (c Client) SetMetaData(ctx context.Context, containerName, blobName string, input SetMetaDataInput) (resp SetMetaDataResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if err := metadata.Validate(input.MetaData); err != nil { + return resp, fmt.Errorf(fmt.Sprintf("`input.MetaData` is not valid: %s.", err)) + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPut, + OptionsObject: setMetadataOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type setMetadataOptions struct { + input SetMetaDataInput +} + +func (s setMetadataOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + if s.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *s.input.LeaseID) + } + headers.Merge(metadata.SetMetaDataHeaders(s.input.MetaData)) + return headers +} + +func (s setMetadataOptions) ToOData() *odata.Query { + return nil +} + +func (s setMetadataOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "metadata") + return out +} diff --git a/storage/2023-11-03/blob/blobs/models.go b/storage/2023-11-03/blob/blobs/models.go new file mode 100644 index 0000000..d7d83aa --- /dev/null +++ b/storage/2023-11-03/blob/blobs/models.go @@ -0,0 +1,82 @@ +package blobs + +type AccessTier string + +var ( + Archive AccessTier = "Archive" + Cool AccessTier = "Cool" + Hot AccessTier = "Hot" +) + +type ArchiveStatus string + +var ( + None ArchiveStatus = "" + RehydratePendingToCool ArchiveStatus = "rehydrate-pending-to-cool" + RehydratePendingToHot ArchiveStatus = "rehydrate-pending-to-hot" +) + +type BlockListType string + +var ( + All BlockListType = "all" + Committed BlockListType = "committed" + Uncommitted BlockListType = "uncommitted" +) + +type Block struct { + // The base64-encoded Block ID + Name string `xml:"Name"` + + // The size of the Block in Bytes + Size int64 `xml:"Size"` +} + +type BlobType string + +var ( + AppendBlob BlobType = "AppendBlob" + BlockBlob BlobType = "BlockBlob" + PageBlob BlobType = "PageBlob" +) + +type CommittedBlocks struct { + Blocks []Block `xml:"Block"` +} + +type CopyStatus string + +var ( + Aborted CopyStatus = "aborted" + Failed CopyStatus = "failed" + Pending CopyStatus = "pending" + Success CopyStatus = "success" +) + +type LeaseDuration string + +var ( + Fixed LeaseDuration = "fixed" + Infinite LeaseDuration = "infinite" +) + +type LeaseState string + +var ( + Available LeaseState = "available" + Breaking LeaseState = "breaking" + Broken LeaseState = "broken" + Expired LeaseState = "expired" + Leased LeaseState = "leased" +) + +type LeaseStatus string + +var ( + Locked LeaseStatus = "locked" + Unlocked LeaseStatus = "unlocked" +) + +type UncommittedBlocks struct { + Blocks []Block `xml:"Block"` +} diff --git a/storage/2023-11-03/blob/blobs/properties_get.go b/storage/2023-11-03/blob/blobs/properties_get.go new file mode 100644 index 0000000..86b7c11 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/properties_get.go @@ -0,0 +1,287 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type GetPropertiesInput struct { + // The ID of the Lease + // This must be specified if a Lease is present on the Blob, else a 403 is returned + LeaseID *string +} + +type GetPropertiesResponse struct { + HttpResponse *client.Response + + // The tier of page blob on a premium storage account or tier of block blob on blob storage or general purpose v2 account. + AccessTier AccessTier + + // This gives the last time tier was changed on the object. + // This header is returned only if tier on block blob was ever set. + // The date format follows RFC 1123 + AccessTierChangeTime string + + // For page blobs on a premium storage account only. + // If the access tier is not explicitly set on the blob, the tier is inferred based on its content length + // and this header will be returned with true value. + // For block blobs on Blob Storage or general purpose v2 account, if the blob does not have the access tier + // set then we infer the tier from the storage account properties. This header is set only if the block blob + // tier is inferred + AccessTierInferred bool + + // For blob storage or general purpose v2 account. + // If the blob is being rehydrated and is not complete then this header is returned indicating + // that rehydrate is pending and also tells the destination tier + ArchiveStatus ArchiveStatus + + // The number of committed blocks present in the blob. + // This header is returned only for append blobs. + BlobCommittedBlockCount string + + // The current sequence number for a page blob. + // This header is not returned for block blobs or append blobs. + // This header is not returned for block blobs. + BlobSequenceNumber string + + // The blob type. + BlobType BlobType + + // If the Cache-Control request header has previously been set for the blob, that value is returned in this header. + CacheControl string + + // The Content-Disposition response header field conveys additional information about how to process + // the response payload, and also can be used to attach additional metadata. + // For example, if set to attachment, it indicates that the user-agent should not display the response, + // but instead show a Save As dialog. + ContentDisposition string + + // If the Content-Encoding request header has previously been set for the blob, + // that value is returned in this header. + ContentEncoding string + + // If the Content-Language request header has previously been set for the blob, + // that value is returned in this header. + ContentLanguage string + + // The size of the blob in bytes. + // For a page blob, this header returns the value of the x-ms-blob-content-length header stored with the blob. + ContentLength int64 + + // The content type specified for the blob. + // If no content type was specified, the default content type is `application/octet-stream`. + ContentType string + + // If the Content-MD5 header has been set for the blob, this response header is returned so that + // the client can check for message content integrity. + ContentMD5 string + + // Conclusion time of the last attempted Copy Blob operation where this blob was the destination blob. + // This value can specify the time of a completed, aborted, or failed copy attempt. + // This header does not appear if a copy is pending, if this blob has never been the + // destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob + // operation using Set Blob Properties, Put Blob, or Put Block List. + CopyCompletionTime string + + // Included if the blob is incremental copy blob or incremental copy snapshot, if x-ms-copy-status is success. + // Snapshot time of the last successful incremental copy snapshot for this blob + CopyDestinationSnapshot string + + // String identifier for the last attempted Copy Blob operation where this blob was the destination blob. + // This header does not appear if this blob has never been the destination in a Copy Blob operation, + // or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, + // Put Blob, or Put Block List. + CopyID string + + // Contains the number of bytes copied and the total bytes in the source in the last attempted + // Copy Blob operation where this blob was the destination blob. + // Can show between 0 and Content-Length bytes copied. + // This header does not appear if this blob has never been the destination in a Copy Blob operation, + // or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, + // Put Blob, or Put Block List. + CopyProgress string + + // URL up to 2 KB in length that specifies the source blob used in the last attempted Copy Blob operation + // where this blob was the destination blob. + // This header does not appear if this blob has never been the destination in a Copy Blob operation, + // or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, + // Put Blob, or Put Block List + CopySource string + + // State of the copy operation identified by x-ms-copy-id, with these values: + // - success: Copy completed successfully. + // - pending: Copy is in progress. + // Check x-ms-copy-status-description if intermittent, non-fatal errors + // impede copy progress but don’t cause failure. + // - aborted: Copy was ended by Abort Copy Blob. + // - failed: Copy failed. See x-ms- copy-status-description for failure details. + // This header does not appear if this blob has never been the destination in a Copy Blob operation, + // or if this blob has been modified after a completed Copy Blob operation using Set Blob Properties, + // Put Blob, or Put Block List. + CopyStatus CopyStatus + + // Describes cause of fatal or non-fatal copy operation failure. + // This header does not appear if this blob has never been the destination in a Copy Blob operation, + // or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, + // Put Blob, or Put Block List. + CopyStatusDescription string + + // The date/time at which the blob was created. The date format follows RFC 1123 + CreationTime string + + // The ETag contains a value that you can use to perform operations conditionally + ETag string + + // Included if the blob is incremental copy blob. + IncrementalCopy bool + + // The date/time that the blob was last modified. The date format follows RFC 1123. + LastModified string + + // When a blob is leased, specifies whether the lease is of infinite or fixed duration + LeaseDuration LeaseDuration + + // The lease state of the blob + LeaseState LeaseState + + LeaseStatus LeaseStatus + + // A set of name-value pairs that correspond to the user-defined metadata associated with this blob + MetaData map[string]string + + // Is the Storage Account encrypted using server-side encryption? This should always return true + ServerEncrypted bool +} + +// GetProperties returns all user-defined metadata, standard HTTP properties, and system properties for the blob +func (c Client) GetProperties(ctx context.Context, containerName, blobName string, input GetPropertiesInput) (resp GetPropertiesResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodHead, + OptionsObject: getPropertiesOptions{ + leaseID: input.LeaseID, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.AccessTier = AccessTier(resp.HttpResponse.Header.Get("x-ms-access-tier")) + resp.AccessTierChangeTime = resp.HttpResponse.Header.Get("x-ms-access-tier-change-time") + resp.ArchiveStatus = ArchiveStatus(resp.HttpResponse.Header.Get("x-ms-archive-status")) + resp.BlobCommittedBlockCount = resp.HttpResponse.Header.Get("x-ms-blob-committed-block-count") + resp.BlobSequenceNumber = resp.HttpResponse.Header.Get("x-ms-blob-sequence-number") + resp.BlobType = BlobType(resp.HttpResponse.Header.Get("x-ms-blob-type")) + resp.CacheControl = resp.HttpResponse.Header.Get("Cache-Control") + resp.ContentDisposition = resp.HttpResponse.Header.Get("Content-Disposition") + resp.ContentEncoding = resp.HttpResponse.Header.Get("Content-Encoding") + resp.ContentLanguage = resp.HttpResponse.Header.Get("Content-Language") + resp.ContentMD5 = resp.HttpResponse.Header.Get("Content-MD5") + resp.ContentType = resp.HttpResponse.Header.Get("Content-Type") + resp.CopyCompletionTime = resp.HttpResponse.Header.Get("x-ms-copy-completion-time") + resp.CopyDestinationSnapshot = resp.HttpResponse.Header.Get("x-ms-copy-destination-snapshot") + resp.CopyID = resp.HttpResponse.Header.Get("x-ms-copy-id") + resp.CopyProgress = resp.HttpResponse.Header.Get("x-ms-copy-progress") + resp.CopySource = resp.HttpResponse.Header.Get("x-ms-copy-source") + resp.CopyStatus = CopyStatus(resp.HttpResponse.Header.Get("x-ms-copy-status")) + resp.CopyStatusDescription = resp.HttpResponse.Header.Get("x-ms-copy-status-description") + resp.CreationTime = resp.HttpResponse.Header.Get("x-ms-creation-time") + resp.ETag = resp.HttpResponse.Header.Get("Etag") + resp.LastModified = resp.HttpResponse.Header.Get("Last-Modified") + resp.LeaseDuration = LeaseDuration(resp.HttpResponse.Header.Get("x-ms-lease-duration")) + resp.LeaseState = LeaseState(resp.HttpResponse.Header.Get("x-ms-lease-state")) + resp.LeaseStatus = LeaseStatus(resp.HttpResponse.Header.Get("x-ms-lease-status")) + resp.MetaData = metadata.ParseFromHeaders(resp.HttpResponse.Header) + + if v := resp.HttpResponse.Header.Get("x-ms-access-tier-inferred"); v != "" { + b, innerErr := strconv.ParseBool(v) + if innerErr != nil { + err = fmt.Errorf("error parsing %q as a bool: %s", v, innerErr) + return + } + resp.AccessTierInferred = b + } + + if v := resp.HttpResponse.Header.Get("Content-Length"); v != "" { + i, innerErr := strconv.Atoi(v) + if innerErr != nil { + err = fmt.Errorf("error parsing %q as an integer: %s", v, innerErr) + } + resp.ContentLength = int64(i) + } + + if v := resp.HttpResponse.Header.Get("x-ms-incremental-copy"); v != "" { + b, innerErr := strconv.ParseBool(v) + if innerErr != nil { + err = fmt.Errorf("error parsing %q as a bool: %s", v, innerErr) + return + } + resp.IncrementalCopy = b + } + + if v := resp.HttpResponse.Header.Get("x-ms-server-encrypted"); v != "" { + b, innerErr := strconv.ParseBool(v) + if innerErr != nil { + err = fmt.Errorf("error parsing %q as a bool: %s", v, innerErr) + return + } + resp.ServerEncrypted = b + } + } + } + + return +} + +type getPropertiesOptions struct { + leaseID *string +} + +func (g getPropertiesOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + if g.leaseID != nil { + headers.Append("x-ms-lease-id", *g.leaseID) + } + return headers +} + +func (g getPropertiesOptions) ToOData() *odata.Query { + return nil +} + +func (g getPropertiesOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/blob/blobs/properties_set.go b/storage/2023-11-03/blob/blobs/properties_set.go new file mode 100644 index 0000000..ebe0985 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/properties_set.go @@ -0,0 +1,132 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type SetPropertiesInput struct { + CacheControl *string + ContentType *string + ContentMD5 *string + ContentEncoding *string + ContentLanguage *string + LeaseID *string + ContentDisposition *string + ContentLength *int64 + SequenceNumberAction *SequenceNumberAction + BlobSequenceNumber *string +} + +type SetPropertiesResponse struct { + HttpResponse *client.Response + + BlobSequenceNumber string + Etag string +} + +// SetProperties sets system properties on the blob. +func (c Client) SetProperties(ctx context.Context, containerName, blobName string, input SetPropertiesInput) (resp SetPropertiesResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPut, + OptionsObject: setPropertiesOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type SequenceNumberAction string + +var ( + Increment SequenceNumberAction = "increment" + Max SequenceNumberAction = "max" + Update SequenceNumberAction = "update" +) + +type setPropertiesOptions struct { + input SetPropertiesInput +} + +func (s setPropertiesOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + if s.input.CacheControl != nil { + headers.Append("x-ms-blob-cache-control", *s.input.CacheControl) + } + if s.input.ContentDisposition != nil { + headers.Append("x-ms-blob-content-disposition", *s.input.ContentDisposition) + } + if s.input.ContentEncoding != nil { + headers.Append("x-ms-blob-content-encoding", *s.input.ContentEncoding) + } + if s.input.ContentLanguage != nil { + headers.Append("x-ms-blob-content-language", *s.input.ContentLanguage) + } + if s.input.ContentMD5 != nil { + headers.Append("x-ms-blob-content-md5", *s.input.ContentMD5) + } + if s.input.ContentType != nil { + headers.Append("x-ms-blob-content-type", *s.input.ContentType) + } + if s.input.ContentLength != nil { + headers.Append("x-ms-blob-content-length", strconv.Itoa(int(*s.input.ContentLength))) + } + if s.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *s.input.LeaseID) + } + if s.input.SequenceNumberAction != nil { + headers.Append("x-ms-sequence-number-action", string(*s.input.SequenceNumberAction)) + } + if s.input.BlobSequenceNumber != nil { + headers.Append("x-ms-blob-sequence-number", *s.input.BlobSequenceNumber) + } + + return headers +} + +func (s setPropertiesOptions) ToOData() *odata.Query { + return nil +} + +func (s setPropertiesOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "properties") + return out +} diff --git a/storage/2023-11-03/blob/blobs/put_append_blob.go b/storage/2023-11-03/blob/blobs/put_append_blob.go new file mode 100644 index 0000000..0f3aa30 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/put_append_blob.go @@ -0,0 +1,117 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type PutAppendBlobInput struct { + CacheControl *string + ContentDisposition *string + ContentEncoding *string + ContentLanguage *string + ContentMD5 *string + ContentType *string + LeaseID *string + MetaData map[string]string +} + +type PutAppendBlobResponse struct { + HttpResponse *client.Response +} + +// PutAppendBlob is a wrapper around the Put API call (with a stricter input object) +// which creates a new append blob, or updates the content of an existing blob. +func (c Client) PutAppendBlob(ctx context.Context, containerName, blobName string, input PutAppendBlobInput) (resp PutAppendBlobResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if err := metadata.Validate(input.MetaData); err != nil { + return resp, fmt.Errorf(fmt.Sprintf("`input.MetaData` is not valid: %s.", err)) + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: putAppendBlobOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type putAppendBlobOptions struct { + input PutAppendBlobInput +} + +func (p putAppendBlobOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + headers.Append("x-ms-blob-type", string(AppendBlob)) + headers.Append("Content-Length", "0") + + if p.input.CacheControl != nil { + headers.Append("x-ms-blob-cache-control", *p.input.CacheControl) + } + if p.input.ContentDisposition != nil { + headers.Append("x-ms-blob-content-disposition", *p.input.ContentDisposition) + } + if p.input.ContentEncoding != nil { + headers.Append("x-ms-blob-content-encoding", *p.input.ContentEncoding) + } + if p.input.ContentLanguage != nil { + headers.Append("x-ms-blob-content-language", *p.input.ContentLanguage) + } + if p.input.ContentMD5 != nil { + headers.Append("x-ms-blob-content-md5", *p.input.ContentMD5) + } + if p.input.ContentType != nil { + headers.Append("x-ms-blob-content-type", *p.input.ContentType) + } + if p.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *p.input.LeaseID) + } + + headers.Merge(metadata.SetMetaDataHeaders(p.input.MetaData)) + return headers +} + +func (p putAppendBlobOptions) ToOData() *odata.Query { + return nil +} + +func (p putAppendBlobOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/blob/blobs/put_block.go b/storage/2023-11-03/blob/blobs/put_block.go new file mode 100644 index 0000000..a6059c4 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/put_block.go @@ -0,0 +1,108 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type PutBlockInput struct { + BlockID string + Content []byte + ContentMD5 *string + LeaseID *string +} + +type PutBlockResponse struct { + HttpResponse *client.Response + + ContentMD5 string +} + +// PutBlock creates a new block to be committed as part of a blob. +func (c Client) PutBlock(ctx context.Context, containerName, blobName string, input PutBlockInput) (resp PutBlockResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if input.BlockID == "" { + return resp, fmt.Errorf("`input.BlockID` cannot be an empty string") + } + + if len(input.Content) == 0 { + return resp, fmt.Errorf("`input.Content` cannot be empty") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: putBlockOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + err = req.Marshal(&input.Content) + if err != nil { + return resp, fmt.Errorf("marshalling request: %v", err) + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type putBlockOptions struct { + input PutBlockInput +} + +func (p putBlockOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("Content-Length", strconv.Itoa(len(p.input.Content))) + + if p.input.ContentMD5 != nil { + headers.Append("x-ms-blob-content-md5", *p.input.ContentMD5) + } + if p.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *p.input.LeaseID) + } + + return headers +} + +func (p putBlockOptions) ToOData() *odata.Query { + return nil +} + +func (p putBlockOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "block") + out.Append("blockid", p.input.BlockID) + return out +} diff --git a/storage/2023-11-03/blob/blobs/put_block_blob.go b/storage/2023-11-03/blob/blobs/put_block_blob.go new file mode 100644 index 0000000..462ea17 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/put_block_blob.go @@ -0,0 +1,130 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type PutBlockBlobInput struct { + CacheControl *string + Content *[]byte + ContentDisposition *string + ContentEncoding *string + ContentLanguage *string + ContentMD5 *string + ContentType *string + LeaseID *string + MetaData map[string]string +} + +type PutBlockBlobResponse struct { + HttpResponse *client.Response +} + +// PutBlockBlob is a wrapper around the Put API call (with a stricter input object) +// which creates a new block append blob, or updates the content of an existing block blob. +func (c Client) PutBlockBlob(ctx context.Context, containerName, blobName string, input PutBlockBlobInput) (resp PutBlockBlobResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if input.Content != nil && len(*input.Content) == 0 { + return resp, fmt.Errorf("`input.Content` must either be nil or not empty") + } + + if err := metadata.Validate(input.MetaData); err != nil { + return resp, fmt.Errorf(fmt.Sprintf("`input.MetaData` is not valid: %s.", err)) + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: putBlockBlobOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + err = req.Marshal(&input.Content) + if err != nil { + return resp, fmt.Errorf("marshalling request: %v", err) + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type putBlockBlobOptions struct { + input PutBlockBlobInput +} + +func (p putBlockBlobOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("x-ms-blob-type", string(BlockBlob)) + + if p.input.CacheControl != nil { + headers.Append("x-ms-blob-cache-control", *p.input.CacheControl) + } + if p.input.ContentDisposition != nil { + headers.Append("x-ms-blob-content-disposition", *p.input.ContentDisposition) + } + if p.input.ContentEncoding != nil { + headers.Append("x-ms-blob-content-encoding", *p.input.ContentEncoding) + } + if p.input.ContentLanguage != nil { + headers.Append("x-ms-blob-content-language", *p.input.ContentLanguage) + } + if p.input.ContentMD5 != nil { + headers.Append("x-ms-blob-content-md5", *p.input.ContentMD5) + } + if p.input.ContentType != nil { + headers.Append("x-ms-blob-content-type", *p.input.ContentType) + } + if p.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *p.input.LeaseID) + } + if p.input.Content != nil { + headers.Append("Content-Length", strconv.Itoa(len(*p.input.Content))) + } + + headers.Merge(metadata.SetMetaDataHeaders(p.input.MetaData)) + + return headers +} + +func (p putBlockBlobOptions) ToOData() *odata.Query { + return nil +} + +func (p putBlockBlobOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/blob/blobs/put_block_blob_file.go b/storage/2023-11-03/blob/blobs/put_block_blob_file.go new file mode 100644 index 0000000..932fb32 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/put_block_blob_file.go @@ -0,0 +1,34 @@ +package blobs + +import ( + "context" + "fmt" + "io" + "os" +) + +// PutBlockBlobFromFile is a helper method which takes a file, and automatically chunks it up, rather than having to do this yourself +func (c Client) PutBlockBlobFromFile(ctx context.Context, containerName, blobName string, file *os.File, input PutBlockBlobInput) error { + fileInfo, err := file.Stat() + if err != nil { + return fmt.Errorf("error loading file info: %s", err) + } + + fileSize := fileInfo.Size() + bytes := make([]byte, fileSize) + + _, err = file.ReadAt(bytes, 0) + if err != nil { + if err != io.EOF { + return fmt.Errorf("Error reading bytes: %s", err) + } + } + + input.Content = &bytes + + if _, err = c.PutBlockBlob(ctx, containerName, blobName, input); err != nil { + return fmt.Errorf("error putting bytes: %s", err) + } + + return nil +} diff --git a/storage/2023-11-03/blob/blobs/put_block_list.go b/storage/2023-11-03/blob/blobs/put_block_list.go new file mode 100644 index 0000000..bfe451b --- /dev/null +++ b/storage/2023-11-03/blob/blobs/put_block_list.go @@ -0,0 +1,142 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type BlockList struct { + CommittedBlockIDs []BlockID `xml:"Committed,omitempty"` + UncommittedBlockIDs []BlockID `xml:"Uncommitted,omitempty"` + LatestBlockIDs []BlockID `xml:"Latest,omitempty"` +} + +type BlockID struct { + Value string `xml:",chardata"` +} + +type PutBlockListInput struct { + BlockList BlockList + CacheControl *string + ContentDisposition *string + ContentEncoding *string + ContentLanguage *string + ContentMD5 *string + ContentType *string + MetaData map[string]string + LeaseID *string +} + +type PutBlockListResponse struct { + HttpResponse *client.Response + + ContentMD5 string + ETag string + LastModified string +} + +// PutBlockList writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written +// to the server in a prior Put Block operation. +func (c Client) PutBlockList(ctx context.Context, containerName, blobName string, input PutBlockListInput) (resp PutBlockListResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: putBlockListOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + err = req.Marshal(&input.BlockList) + if err != nil { + return resp, fmt.Errorf("marshalling request: %v", err) + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.ContentMD5 = resp.HttpResponse.Header.Get("Content-MD5") + resp.ETag = resp.HttpResponse.Header.Get("ETag") + resp.LastModified = resp.HttpResponse.Header.Get("Last-Modified") + } + } + + return +} + +type putBlockListOptions struct { + input PutBlockListInput +} + +func (p putBlockListOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + if p.input.CacheControl != nil { + headers.Append("x-ms-blob-cache-control", *p.input.CacheControl) + } + if p.input.ContentDisposition != nil { + headers.Append("x-ms-blob-content-disposition", *p.input.ContentDisposition) + } + if p.input.ContentEncoding != nil { + headers.Append("x-ms-blob-content-encoding", *p.input.ContentEncoding) + } + if p.input.ContentLanguage != nil { + headers.Append("x-ms-blob-content-language", *p.input.ContentLanguage) + } + if p.input.ContentMD5 != nil { + headers.Append("x-ms-blob-content-md5", *p.input.ContentMD5) + } + if p.input.ContentType != nil { + headers.Append("x-ms-blob-content-type", *p.input.ContentType) + } + if p.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *p.input.LeaseID) + } + + headers.Merge(metadata.SetMetaDataHeaders(p.input.MetaData)) + + return headers +} + +func (p putBlockListOptions) ToOData() *odata.Query { + return nil +} + +func (p putBlockListOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "blocklist") + return out +} diff --git a/storage/2023-11-03/blob/blobs/put_block_url.go b/storage/2023-11-03/blob/blobs/put_block_url.go new file mode 100644 index 0000000..cd8487c --- /dev/null +++ b/storage/2023-11-03/blob/blobs/put_block_url.go @@ -0,0 +1,112 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type PutBlockFromURLInput struct { + BlockID string + CopySource string + + ContentMD5 *string + LeaseID *string + Range *string +} + +type PutBlockFromURLResponse struct { + HttpResponse *client.Response + ContentMD5 string +} + +// PutBlockFromURL creates a new block to be committed as part of a blob where the contents are read from a URL +func (c Client) PutBlockFromURL(ctx context.Context, containerName, blobName string, input PutBlockFromURLInput) (resp PutBlockFromURLResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if input.BlockID == "" { + return resp, fmt.Errorf("`input.BlockID` cannot be an empty string") + } + + if input.CopySource == "" { + return resp, fmt.Errorf("`input.CopySource` cannot be an empty string") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: putBlockUrlOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.ContentMD5 = resp.HttpResponse.Header.Get("Content-MD5") + } + } + + return +} + +type putBlockUrlOptions struct { + input PutBlockFromURLInput +} + +func (p putBlockUrlOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + headers.Append("x-ms-copy-source", p.input.CopySource) + + if p.input.ContentMD5 != nil { + headers.Append("x-ms-source-content-md5", *p.input.ContentMD5) + } + if p.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *p.input.LeaseID) + } + if p.input.Range != nil { + headers.Append("x-ms-source-range", *p.input.Range) + } + return headers +} + +func (p putBlockUrlOptions) ToOData() *odata.Query { + return nil +} + +func (p putBlockUrlOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "block") + out.Append("blockid", p.input.BlockID) + return out +} diff --git a/storage/2023-11-03/blob/blobs/put_page_blob.go b/storage/2023-11-03/blob/blobs/put_page_blob.go new file mode 100644 index 0000000..2b733c4 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/put_page_blob.go @@ -0,0 +1,143 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type PutPageBlobInput struct { + CacheControl *string + ContentDisposition *string + ContentEncoding *string + ContentLanguage *string + ContentMD5 *string + ContentType *string + LeaseID *string + MetaData map[string]string + + BlobContentLengthBytes int64 + BlobSequenceNumber *int64 + AccessTier *AccessTier +} + +type PutPageBlobResponse struct { + HttpResponse *client.Response +} + +// PutPageBlob is a wrapper around the Put API call (with a stricter input object) +// which creates a new block blob, or updates the content of an existing page blob. +func (c Client) PutPageBlob(ctx context.Context, containerName, blobName string, input PutPageBlobInput) (resp PutPageBlobResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if input.BlobContentLengthBytes == 0 || input.BlobContentLengthBytes%512 != 0 { + return resp, fmt.Errorf("`input.BlobContentLengthBytes` must be aligned to a 512-byte boundary") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: putPageBlobOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type putPageBlobOptions struct { + input PutPageBlobInput +} + +func (p putPageBlobOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + headers.Append("x-ms-blob-type", string(PageBlob)) + + // For a page blob or an page blob, the value of this header must be set to zero, + // as Put Blob is used only to initialize the blob + headers.Append("Content-Length", "0") + + // This header specifies the maximum size for the page blob, up to 8 TB. + // The page blob size must be aligned to a 512-byte boundary. + headers.Append("x-ms-blob-content-length", strconv.Itoa(int(p.input.BlobContentLengthBytes))) + + if p.input.AccessTier != nil { + headers.Append("x-ms-access-tier", string(*p.input.AccessTier)) + } + + if p.input.BlobSequenceNumber != nil { + headers.Append("x-ms-blob-sequence-number", strconv.Itoa(int(*p.input.BlobSequenceNumber))) + } + + if p.input.CacheControl != nil { + headers.Append("x-ms-blob-cache-control", *p.input.CacheControl) + } + + if p.input.ContentDisposition != nil { + headers.Append("x-ms-blob-content-disposition", *p.input.ContentDisposition) + } + + if p.input.ContentEncoding != nil { + headers.Append("x-ms-blob-content-encoding", *p.input.ContentEncoding) + } + + if p.input.ContentLanguage != nil { + headers.Append("x-ms-blob-content-language", *p.input.ContentLanguage) + } + + if p.input.ContentMD5 != nil { + headers.Append("x-ms-blob-content-md5", *p.input.ContentMD5) + } + + if p.input.ContentType != nil { + headers.Append("x-ms-blob-content-type", *p.input.ContentType) + } + + if p.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *p.input.LeaseID) + } + + headers.Merge(metadata.SetMetaDataHeaders(p.input.MetaData)) + return headers +} + +func (p putPageBlobOptions) ToOData() *odata.Query { + return nil +} + +func (p putPageBlobOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/blob/blobs/put_page_clear.go b/storage/2023-11-03/blob/blobs/put_page_clear.go new file mode 100644 index 0000000..1a99513 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/put_page_clear.go @@ -0,0 +1,97 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type PutPageClearInput struct { + StartByte int64 + EndByte int64 + + LeaseID *string +} + +type PutPageClearResponse struct { + HttpResponse *client.Response +} + +// PutPageClear clears a range of pages within a page blob. +func (c Client) PutPageClear(ctx context.Context, containerName, blobName string, input PutPageClearInput) (resp PutPageClearResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if input.StartByte < 0 { + return resp, fmt.Errorf("`input.StartByte` must be greater than or equal to 0") + } + + if input.EndByte <= 0 { + return resp, fmt.Errorf("`input.EndByte` must be greater than 0") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: putPageClearOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type putPageClearOptions struct { + input PutPageClearInput +} + +func (p putPageClearOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + headers.Append("x-ms-page-write", "clear") + headers.Append("x-ms-range", fmt.Sprintf("bytes=%d-%d", p.input.StartByte, p.input.EndByte)) + + if p.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *p.input.LeaseID) + } + return headers +} + +func (p putPageClearOptions) ToOData() *odata.Query { + return nil +} + +func (p putPageClearOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "page") + return out +} diff --git a/storage/2023-11-03/blob/blobs/put_page_update.go b/storage/2023-11-03/blob/blobs/put_page_update.go new file mode 100644 index 0000000..448fe34 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/put_page_update.go @@ -0,0 +1,156 @@ +package blobs + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type PutPageUpdateInput struct { + StartByte int64 + EndByte int64 + Content []byte + + IfSequenceNumberEQ *string + IfSequenceNumberLE *string + IfSequenceNumberLT *string + IfModifiedSince *string + IfUnmodifiedSince *string + IfMatch *string + IfNoneMatch *string + LeaseID *string +} + +type PutPageUpdateResponse struct { + HttpResponse *client.Response + + BlobSequenceNumber string + ContentMD5 string + LastModified string +} + +// PutPageUpdate writes a range of pages to a page blob. +func (c Client) PutPageUpdate(ctx context.Context, containerName, blobName string, input PutPageUpdateInput) (resp PutPageUpdateResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if input.StartByte < 0 { + return resp, fmt.Errorf("`input.StartByte` must be greater than or equal to 0") + } + + if input.EndByte <= 0 { + return resp, fmt.Errorf("`input.EndByte` must be greater than 0") + } + + expectedSize := (input.EndByte - input.StartByte) + 1 + actualSize := int64(len(input.Content)) + if expectedSize != actualSize { + return resp, fmt.Errorf(fmt.Sprintf("Content Size was defined as %d but got %d.", expectedSize, actualSize)) + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: putPageUpdateOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + req.Body = io.NopCloser(bytes.NewReader(input.Content)) + req.ContentLength = int64(len(input.Content)) + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil && resp.HttpResponse.Header != nil { + resp.BlobSequenceNumber = resp.HttpResponse.Header.Get("x-ms-blob-sequence-number") + resp.ContentMD5 = resp.HttpResponse.Header.Get("Content-MD5") + resp.LastModified = resp.HttpResponse.Header.Get("Last-Modified") + } + + return +} + +type putPageUpdateOptions struct { + input PutPageUpdateInput +} + +func (p putPageUpdateOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("x-ms-page-write", "update") + headers.Append("x-ms-range", fmt.Sprintf("bytes=%d-%d", p.input.StartByte, p.input.EndByte)) + headers.Append("Content-Length", strconv.Itoa(len(p.input.Content))) + + if p.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *p.input.LeaseID) + } + + if p.input.IfSequenceNumberEQ != nil { + headers.Append("x-ms-if-sequence-number-eq", *p.input.IfSequenceNumberEQ) + } + + if p.input.IfSequenceNumberLE != nil { + headers.Append("x-ms-if-sequence-number-le", *p.input.IfSequenceNumberLE) + } + + if p.input.IfSequenceNumberLT != nil { + headers.Append("x-ms-if-sequence-number-lt", *p.input.IfSequenceNumberLT) + } + + if p.input.IfModifiedSince != nil { + headers.Append("If-Modified-Since", *p.input.IfModifiedSince) + } + + if p.input.IfUnmodifiedSince != nil { + headers.Append("If-Unmodified-Since", *p.input.IfUnmodifiedSince) + } + + if p.input.IfMatch != nil { + headers.Append("If-Match", *p.input.IfMatch) + } + + if p.input.IfNoneMatch != nil { + headers.Append("If-None-Match", *p.input.IfNoneMatch) + } + + return headers +} + +func (p putPageUpdateOptions) ToOData() *odata.Query { + return nil +} + +func (p putPageUpdateOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "page") + return out +} diff --git a/storage/2023-11-03/blob/blobs/set_tier.go b/storage/2023-11-03/blob/blobs/set_tier.go new file mode 100644 index 0000000..35cca5b --- /dev/null +++ b/storage/2023-11-03/blob/blobs/set_tier.go @@ -0,0 +1,81 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type SetTierInput struct { + Tier AccessTier +} + +type SetTierResponse struct { + HttpResponse *client.Response +} + +// SetTier sets the tier on a blob. +func (c Client) SetTier(ctx context.Context, containerName, blobName string, input SetTierInput) (resp SetTierResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusOK, + http.StatusAccepted, + }, + HttpMethod: http.MethodPut, + OptionsObject: setTierOptions{ + tier: input.Tier, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type setTierOptions struct { + tier AccessTier +} + +func (s setTierOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("x-ms-access-tier", string(s.tier)) + return headers +} + +func (s setTierOptions) ToOData() *odata.Query { + return nil +} + +func (s setTierOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "tier") + return out +} diff --git a/storage/2023-11-03/blob/blobs/snapshot.go b/storage/2023-11-03/blob/blobs/snapshot.go new file mode 100644 index 0000000..8a90dcd --- /dev/null +++ b/storage/2023-11-03/blob/blobs/snapshot.go @@ -0,0 +1,144 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type SnapshotInput struct { + // The ID of the Lease + // This must be specified if a Lease is present on the Blob, else a 403 is returned + LeaseID *string + + // MetaData is a user-defined name-value pair associated with the blob. + // If no name-value pairs are specified, the operation will copy the base blob metadata to the snapshot. + // If one or more name-value pairs are specified, the snapshot is created with the specified metadata, + // and metadata is not copied from the base blob. + MetaData map[string]string + + // A DateTime value which will only snapshot the blob if it has been modified since the specified date/time + // If the base blob has not been modified, the Blob service returns status code 412 (Precondition Failed). + IfModifiedSince *string + + // A DateTime value which will only snapshot the blob if it has not been modified since the specified date/time + // If the base blob has been modified, the Blob service returns status code 412 (Precondition Failed). + IfUnmodifiedSince *string + + // An ETag value to snapshot the blob only if its ETag value matches the value specified. + // If the values do not match, the Blob service returns status code 412 (Precondition Failed). + IfMatch *string + + // An ETag value for this conditional header to snapshot the blob only if its ETag value + // does not match the value specified. + // If the values are identical, the Blob service returns status code 412 (Precondition Failed). + IfNoneMatch *string +} + +type SnapshotResponse struct { + HttpResponse *client.Response + + // The ETag of the snapshot + ETag string + + // A DateTime value that uniquely identifies the snapshot. + // The value of this header indicates the snapshot version, + // and may be used in subsequent requests to access the snapshot. + SnapshotDateTime string +} + +// Snapshot captures a Snapshot of a given Blob +func (c Client) Snapshot(ctx context.Context, containerName, blobName string, input SnapshotInput) (resp SnapshotResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if err := metadata.Validate(input.MetaData); err != nil { + return resp, fmt.Errorf(fmt.Sprintf("`input.MetaData` is not valid: %s.", err)) + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: snapshotOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil && resp.HttpResponse.Header != nil { + resp.ETag = resp.HttpResponse.Header.Get("ETag") + resp.SnapshotDateTime = resp.HttpResponse.Header.Get("x-ms-snapshot") + } + + return +} + +type snapshotOptions struct { + input SnapshotInput +} + +func (s snapshotOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + if s.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *s.input.LeaseID) + } + + if s.input.IfModifiedSince != nil { + headers.Append("If-Modified-Since", *s.input.IfModifiedSince) + } + + if s.input.IfUnmodifiedSince != nil { + headers.Append("If-Unmodified-Since", *s.input.IfUnmodifiedSince) + } + + if s.input.IfMatch != nil { + headers.Append("If-Match", *s.input.IfMatch) + } + + if s.input.IfNoneMatch != nil { + headers.Append("If-None-Match", *s.input.IfNoneMatch) + } + + headers.Merge(metadata.SetMetaDataHeaders(s.input.MetaData)) + return headers +} + +func (s snapshotOptions) ToOData() *odata.Query { + return nil +} + +func (s snapshotOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "snapshot") + return out +} diff --git a/storage/2023-11-03/blob/blobs/snapshot_get_properties.go b/storage/2023-11-03/blob/blobs/snapshot_get_properties.go new file mode 100644 index 0000000..57a02f3 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/snapshot_get_properties.go @@ -0,0 +1,160 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type GetSnapshotPropertiesInput struct { + // The ID of the Lease + // This must be specified if a Lease is present on the Blob, else a 403 is returned + LeaseID *string + + // The ID of the Snapshot which should be retrieved + SnapshotID string +} + +// GetSnapshotProperties returns all user-defined metadata, standard HTTP properties, and system properties for +// the specified snapshot of a blob +func (c Client) GetSnapshotProperties(ctx context.Context, containerName, blobName string, input GetSnapshotPropertiesInput) (resp GetPropertiesResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + if input.SnapshotID == "" { + return resp, fmt.Errorf("`input.SnapshotID` cannot be an empty string") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodHead, + OptionsObject: snapshotGetPropertiesOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.AccessTier = AccessTier(resp.HttpResponse.Header.Get("x-ms-access-tier")) + resp.AccessTierChangeTime = resp.HttpResponse.Header.Get("x-ms-access-tier-change-time") + resp.ArchiveStatus = ArchiveStatus(resp.HttpResponse.Header.Get("x-ms-archive-status")) + resp.BlobCommittedBlockCount = resp.HttpResponse.Header.Get("x-ms-blob-committed-block-count") + resp.BlobSequenceNumber = resp.HttpResponse.Header.Get("x-ms-blob-sequence-number") + resp.BlobType = BlobType(resp.HttpResponse.Header.Get("x-ms-blob-type")) + resp.CacheControl = resp.HttpResponse.Header.Get("Cache-Control") + resp.ContentDisposition = resp.HttpResponse.Header.Get("Content-Disposition") + resp.ContentEncoding = resp.HttpResponse.Header.Get("Content-Encoding") + resp.ContentLanguage = resp.HttpResponse.Header.Get("Content-Language") + resp.ContentMD5 = resp.HttpResponse.Header.Get("Content-MD5") + resp.ContentType = resp.HttpResponse.Header.Get("Content-Type") + resp.CopyCompletionTime = resp.HttpResponse.Header.Get("x-ms-copy-completion-time") + resp.CopyDestinationSnapshot = resp.HttpResponse.Header.Get("x-ms-copy-destination-snapshot") + resp.CopyID = resp.HttpResponse.Header.Get("x-ms-copy-id") + resp.CopyProgress = resp.HttpResponse.Header.Get("x-ms-copy-progress") + resp.CopySource = resp.HttpResponse.Header.Get("x-ms-copy-source") + resp.CopyStatus = CopyStatus(resp.HttpResponse.Header.Get("x-ms-copy-status")) + resp.CopyStatusDescription = resp.HttpResponse.Header.Get("x-ms-copy-status-description") + resp.CreationTime = resp.HttpResponse.Header.Get("x-ms-creation-time") + resp.ETag = resp.HttpResponse.Header.Get("Etag") + resp.LastModified = resp.HttpResponse.Header.Get("Last-Modified") + resp.LeaseDuration = LeaseDuration(resp.HttpResponse.Header.Get("x-ms-lease-duration")) + resp.LeaseState = LeaseState(resp.HttpResponse.Header.Get("x-ms-lease-state")) + resp.LeaseStatus = LeaseStatus(resp.HttpResponse.Header.Get("x-ms-lease-status")) + resp.MetaData = metadata.ParseFromHeaders(resp.HttpResponse.Header) + + if v := resp.HttpResponse.Header.Get("x-ms-access-tier-inferred"); v != "" { + b, innerErr := strconv.ParseBool(v) + if innerErr != nil { + err = fmt.Errorf("error parsing %q as a bool: %s", v, innerErr) + return + } + + resp.AccessTierInferred = b + } + + if v := resp.HttpResponse.Header.Get("Content-Length"); v != "" { + i, innerErr := strconv.Atoi(v) + if innerErr != nil { + err = fmt.Errorf("error parsing %q as an integer: %s", v, innerErr) + } + + resp.ContentLength = int64(i) + } + + if v := resp.HttpResponse.Header.Get("x-ms-incremental-copy"); v != "" { + b, innerErr := strconv.ParseBool(v) + if innerErr != nil { + err = fmt.Errorf("error parsing %q as a bool: %s", v, innerErr) + return + } + + resp.IncrementalCopy = b + } + + if v := resp.HttpResponse.Header.Get("x-ms-server-encrypted"); v != "" { + b, innerErr := strconv.ParseBool(v) + if innerErr != nil { + err = fmt.Errorf("error parsing %q as a bool: %s", v, innerErr) + return + } + + resp.ServerEncrypted = b + } + } + } + + return +} + +type snapshotGetPropertiesOptions struct { + input GetSnapshotPropertiesInput +} + +func (s snapshotGetPropertiesOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + if s.input.LeaseID != nil { + headers.Append("x-ms-lease-id", *s.input.LeaseID) + } + return headers +} + +func (s snapshotGetPropertiesOptions) ToOData() *odata.Query { + return nil +} + +func (s snapshotGetPropertiesOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("snapshot", s.input.SnapshotID) + return out +} diff --git a/storage/2023-11-03/blob/blobs/snapshot_test.go b/storage/2023-11-03/blob/blobs/snapshot_test.go new file mode 100644 index 0000000..0807c8a --- /dev/null +++ b/storage/2023-11-03/blob/blobs/snapshot_test.go @@ -0,0 +1,177 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/containers" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +func TestSnapshotLifecycle(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + containerName := fmt.Sprintf("cont-%d", testhelpers.RandomInt()) + fileName := "example.txt" + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindBlobStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + + containersClient, err := containers.NewWithBaseUri(fmt.Sprintf("https://%s.blob.%s", testData.StorageAccountName, *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(containersClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + _, err = containersClient.Create(ctx, containerName, containers.CreateInput{}) + if err != nil { + t.Fatal(fmt.Errorf("Error creating: %s", err)) + } + defer containersClient.Delete(ctx, containerName) + + blobClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.blob.%s", testData.StorageAccountName, *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(blobClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + t.Logf("[DEBUG] Copying file to Blob Storage..") + copyInput := CopyInput{ + CopySource: "http://releases.ubuntu.com/14.04/ubuntu-14.04.6-desktop-amd64.iso", + } + + refreshInterval := 5 * time.Second + if err := blobClient.CopyAndWait(ctx, containerName, fileName, copyInput, refreshInterval); err != nil { + t.Fatalf("Error copying: %s", err) + } + + t.Logf("[DEBUG] First Snapshot..") + firstSnapshot, err := blobClient.Snapshot(ctx, containerName, fileName, SnapshotInput{}) + if err != nil { + t.Fatalf("Error taking first snapshot: %s", err) + } + t.Logf("[DEBUG] First Snapshot ID: %q", firstSnapshot.SnapshotDateTime) + + t.Log("[DEBUG] Waiting 2 seconds..") + time.Sleep(2 * time.Second) + + t.Logf("[DEBUG] Second Snapshot..") + secondSnapshot, err := blobClient.Snapshot(ctx, containerName, fileName, SnapshotInput{ + MetaData: map[string]string{ + "hello": "world", + }, + }) + if err != nil { + t.Fatalf("Error taking Second snapshot: %s", err) + } + t.Logf("[DEBUG] Second Snapshot ID: %q", secondSnapshot.SnapshotDateTime) + + t.Logf("[DEBUG] Leasing the Blob..") + leaseDetails, err := blobClient.AcquireLease(ctx, containerName, fileName, AcquireLeaseInput{ + // infinite + LeaseDuration: -1, + }) + if err != nil { + t.Fatalf("Error leasing Blob: %s", err) + } + t.Logf("[DEBUG] Lease ID: %q", leaseDetails.LeaseID) + + t.Logf("[DEBUG] Third Snapshot..") + thirdSnapshot, err := blobClient.Snapshot(ctx, containerName, fileName, SnapshotInput{ + LeaseID: &leaseDetails.LeaseID, + }) + if err != nil { + t.Fatalf("Error taking Third snapshot: %s", err) + } + t.Logf("[DEBUG] Third Snapshot ID: %q", thirdSnapshot.SnapshotDateTime) + + t.Logf("[DEBUG] Releasing Lease..") + if _, err := blobClient.ReleaseLease(ctx, containerName, fileName, ReleaseLeaseInput{leaseDetails.LeaseID}); err != nil { + t.Fatalf("Error releasing Lease: %s", err) + } + + // get the properties from the blob, which should include the LastModifiedDate + t.Logf("[DEBUG] Retrieving Properties for Blob") + props, err := blobClient.GetProperties(ctx, containerName, fileName, GetPropertiesInput{}) + if err != nil { + t.Fatalf("Error getting properties: %s", err) + } + + // confirm that the If-Modified-None returns an error + t.Logf("[DEBUG] Third Snapshot..") + fourthSnapshot, err := blobClient.Snapshot(ctx, containerName, fileName, SnapshotInput{ + LeaseID: &leaseDetails.LeaseID, + IfModifiedSince: &props.LastModified, + }) + if err == nil { + t.Fatalf("Expected an error but didn't get one") + } + if fourthSnapshot.HttpResponse.StatusCode != http.StatusPreconditionFailed { + t.Fatalf("Expected the status code to be Precondition Failed but got: %d", fourthSnapshot.HttpResponse.StatusCode) + } + + t.Logf("[DEBUG] Retrieving the Second Snapshot Properties..") + getSecondSnapshotInput := GetSnapshotPropertiesInput{ + SnapshotID: secondSnapshot.SnapshotDateTime, + } + if _, err := blobClient.GetSnapshotProperties(ctx, containerName, fileName, getSecondSnapshotInput); err != nil { + t.Fatalf("Error retrieving properties for the second snapshot: %s", err) + } + + t.Logf("[DEBUG] Deleting the Second Snapshot..") + deleteSnapshotInput := DeleteSnapshotInput{ + SnapshotDateTime: secondSnapshot.SnapshotDateTime, + } + if _, err := blobClient.DeleteSnapshot(ctx, containerName, fileName, deleteSnapshotInput); err != nil { + t.Fatalf("Error deleting snapshot: %s", err) + } + + t.Logf("[DEBUG] Re-Retrieving the Second Snapshot Properties..") + secondSnapshotProps, err := blobClient.GetSnapshotProperties(ctx, containerName, fileName, getSecondSnapshotInput) + if err == nil { + t.Fatalf("Expected an error retrieving the snapshot but got none") + } + if secondSnapshotProps.HttpResponse.StatusCode != http.StatusNotFound { + t.Fatalf("Expected the status code to be %d but got %q", http.StatusNoContent, secondSnapshotProps.HttpResponse.StatusCode) + } + + t.Logf("[DEBUG] Deleting all the snapshots..") + if _, err := blobClient.DeleteSnapshots(ctx, containerName, fileName, DeleteSnapshotsInput{}); err != nil { + t.Fatalf("Error deleting snapshots: %s", err) + } + + t.Logf("[DEBUG] Deleting the Blob..") + deleteInput := DeleteInput{ + DeleteSnapshots: false, + } + if _, err := blobClient.Delete(ctx, containerName, fileName, deleteInput); err != nil { + t.Fatalf("Error deleting Blob: %s", err) + } +} diff --git a/storage/2023-11-03/blob/blobs/undelete.go b/storage/2023-11-03/blob/blobs/undelete.go new file mode 100644 index 0000000..e46277c --- /dev/null +++ b/storage/2023-11-03/blob/blobs/undelete.go @@ -0,0 +1,70 @@ +package blobs + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type UndeleteResponse struct { + HttpResponse *client.Response +} + +// Undelete restores the contents and metadata of soft deleted blob and any associated soft deleted snapshots. +func (c Client) Undelete(ctx context.Context, containerName, blobName string) (resp UndeleteResponse, err error) { + + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + if strings.ToLower(containerName) != containerName { + return resp, fmt.Errorf("`containerName` must be a lower-cased string") + } + + if blobName == "" { + return resp, fmt.Errorf("`blobName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPut, + OptionsObject: undeleteOptions{}, + Path: fmt.Sprintf("/%s/%s", containerName, blobName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type undeleteOptions struct{} + +func (u undeleteOptions) ToHeaders() *client.Headers { + return nil +} + +func (u undeleteOptions) ToOData() *odata.Query { + return nil +} + +func (u undeleteOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "undelete") + return out +} diff --git a/storage/2023-11-03/blob/blobs/version.go b/storage/2023-11-03/blob/blobs/version.go new file mode 100644 index 0000000..da934d3 --- /dev/null +++ b/storage/2023-11-03/blob/blobs/version.go @@ -0,0 +1,5 @@ +package blobs + +// APIVersion is the version of the API used for all Storage API Operations +const apiVersion = "2023-11-03" +const componentName = "blob/blobs" diff --git a/storage/2023-11-03/blob/containers/README.md b/storage/2023-11-03/blob/containers/README.md new file mode 100644 index 0000000..9d37548 --- /dev/null +++ b/storage/2023-11-03/blob/containers/README.md @@ -0,0 +1,45 @@ +## Blob Storage Container SDK for API version 2020-08-04 + +This package allows you to interact with the Containers Blob Storage API + +### Supported Authorizers + +* Azure Active Directory (for the Resource Endpoint `https://storage.azure.com`) +* SharedKeyLite (Blob, File & Queue) + +Note: when using the `ListBlobs` operation, only `SharedKeyLite` authentication is supported. + +### Example Usage + +```go +package main + +import ( + "context" + "fmt" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/containers" +) + +func Example() error { + accountName := "storageaccount1" + storageAccountKey := "ABC123...." + containerName := "mycontainer" + + storageAuth := autorest.NewSharedKeyLiteAuthorizer(accountName, storageAccountKey) + containersClient := containers.New() + containersClient.Client.Authorizer = storageAuth + + ctx := context.TODO() + createInput := containers.CreateInput{ + AccessLevel: containers.Private, + } + if _, err := containersClient.Create(ctx, accountName, containerName, createInput); err != nil { + return fmt.Errorf("Error creating Container: %s", err) + } + + return nil +} +``` \ No newline at end of file diff --git a/storage/2023-11-03/blob/containers/api.go b/storage/2023-11-03/blob/containers/api.go new file mode 100644 index 0000000..0c6b65a --- /dev/null +++ b/storage/2023-11-03/blob/containers/api.go @@ -0,0 +1,20 @@ +package containers + +import ( + "context" +) + +type StorageContainer interface { + Create(ctx context.Context, containerName string, input CreateInput) (CreateResponse, error) + Delete(ctx context.Context, containerName string) (DeleteResponse, error) + GetProperties(ctx context.Context, containerName string, input GetPropertiesInput) (GetPropertiesResponse, error) + AcquireLease(ctx context.Context, containerName string, input AcquireLeaseInput) (AcquireLeaseResponse, error) + BreakLease(ctx context.Context, containerName string, input BreakLeaseInput) (BreakLeaseResponse, error) + ChangeLease(ctx context.Context, containerName string, input ChangeLeaseInput) (ChangeLeaseResponse, error) + ReleaseLease(ctx context.Context, containerName string, input ReleaseLeaseInput) (ReleaseLeaseResponse, error) + RenewLease(ctx context.Context, containerName string, input RenewLeaseInput) (RenewLeaseResponse, error) + ListBlobs(ctx context.Context, containerName string, input ListBlobsInput) (ListBlobsResponse, error) + GetResourceManagerResourceID(subscriptionID, resourceGroup, accountName, containerName string) string + SetAccessControl(ctx context.Context, containerName string, input SetAccessControlInput) (SetAccessControlResponse, error) + SetMetaData(ctx context.Context, containerName string, metaData SetMetaDataInput) (SetMetaDataResponse, error) +} diff --git a/storage/2023-11-03/blob/containers/client.go b/storage/2023-11-03/blob/containers/client.go new file mode 100644 index 0000000..72174cb --- /dev/null +++ b/storage/2023-11-03/blob/containers/client.go @@ -0,0 +1,22 @@ +package containers + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/dataplane/storage" +) + +// Client is the base client for Blob Storage Containers. +type Client struct { + Client *storage.BaseClient +} + +func NewWithBaseUri(baseUri string) (*Client, error) { + baseClient, err := storage.NewBaseClient(baseUri, componentName, apiVersion) + if err != nil { + return nil, fmt.Errorf("building base client: %+v", err) + } + return &Client{ + Client: baseClient, + }, nil +} diff --git a/storage/2023-11-03/blob/containers/create.go b/storage/2023-11-03/blob/containers/create.go new file mode 100644 index 0000000..1acbae8 --- /dev/null +++ b/storage/2023-11-03/blob/containers/create.go @@ -0,0 +1,88 @@ +package containers + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type CreateInput struct { + // Specifies whether data in the container may be accessed publicly and the level of access + AccessLevel AccessLevel + + // A name-value pair to associate with the container as metadata. + MetaData map[string]string +} + +type CreateResponse struct { + HttpResponse *client.Response + Error *ErrorResponse `xml:"Error"` +} + +// Create creates a new container under the specified account. +// If the container with the same name already exists, the operation fails. +func (c Client) Create(ctx context.Context, containerName string, input CreateInput) (resp CreateResponse, err error) { + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + if err := metadata.Validate(input.MetaData); err != nil { + return resp, fmt.Errorf("`input.MetaData` is not valid: %+v", err) + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: createOptions{ + accessLevel: input.AccessLevel, + metaData: input.MetaData, + }, + Path: fmt.Sprintf("/%s", containerName), + } + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +var _ client.Options = createOptions{} + +type createOptions struct { + accessLevel AccessLevel + metaData map[string]string +} + +func (o createOptions) ToHeaders() *client.Headers { + headers := containerOptions{ + metaData: o.metaData, + }.ToHeaders() + + // If this header is not included in the request, container data is private to the account owner. + if o.accessLevel != Private { + headers.Append("x-ms-blob-public-access", string(o.accessLevel)) + } + + return headers +} + +func (createOptions) ToOData() *odata.Query { + return nil +} + +func (createOptions) ToQuery() *client.QueryParams { + return containerOptions{}.ToQuery() +} diff --git a/storage/2023-11-03/blob/containers/delete.go b/storage/2023-11-03/blob/containers/delete.go new file mode 100644 index 0000000..0aea0ed --- /dev/null +++ b/storage/2023-11-03/blob/containers/delete.go @@ -0,0 +1,43 @@ +package containers + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" +) + +type DeleteResponse struct { + HttpResponse *client.Response +} + +// Delete marks the specified container for deletion. +// The container and any blobs contained within it are later deleted during garbage collection. +func (c Client) Delete(ctx context.Context, containerName string) (resp DeleteResponse, err error) { + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodDelete, + OptionsObject: containerOptions{}, + Path: fmt.Sprintf("/%s", containerName), + } + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} diff --git a/storage/2023-11-03/blob/containers/get_properties.go b/storage/2023-11-03/blob/containers/get_properties.go new file mode 100644 index 0000000..2655b0b --- /dev/null +++ b/storage/2023-11-03/blob/containers/get_properties.go @@ -0,0 +1,101 @@ +package containers + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type GetPropertiesInput struct { + LeaseId string +} + +type GetPropertiesResponse struct { + HttpResponse *client.Response + Model *ContainerProperties +} + +// GetProperties returns the properties for this Container without a Lease +func (c Client) GetProperties(ctx context.Context, containerName string, input GetPropertiesInput) (resp GetPropertiesResponse, err error) { + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: getPropertiesOptions{ + leaseId: input.LeaseId, + }, + Path: fmt.Sprintf("/%s", containerName), + } + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + resp.Model = &ContainerProperties{} + resp.Model.LeaseStatus = LeaseStatus(resp.HttpResponse.Header.Get("x-ms-lease-status")) + resp.Model.LeaseState = LeaseState(resp.HttpResponse.Header.Get("x-ms-lease-state")) + if resp.Model.LeaseStatus == Locked { + duration := LeaseDuration(resp.HttpResponse.Header.Get("x-ms-lease-duration")) + resp.Model.LeaseDuration = &duration + } + + // If this header is not returned in the response, the container is private to the account owner. + accessLevel := resp.HttpResponse.Header.Get("x-ms-blob-public-access") + if accessLevel != "" { + resp.Model.AccessLevel = AccessLevel(accessLevel) + } else { + resp.Model.AccessLevel = Private + } + + // we can't necessarily use strconv.ParseBool here since this could be nil (only in some API versions) + resp.Model.HasImmutabilityPolicy = strings.EqualFold(resp.HttpResponse.Header.Get("x-ms-has-immutability-policy"), "true") + resp.Model.HasLegalHold = strings.EqualFold(resp.HttpResponse.Header.Get("x-ms-has-legal-hold"), "true") + resp.Model.MetaData = metadata.ParseFromHeaders(resp.HttpResponse.Header) + } + + return +} + +var _ client.Options = getPropertiesOptions{} + +type getPropertiesOptions struct { + leaseId string +} + +func (o getPropertiesOptions) ToHeaders() *client.Headers { + headers := containerOptions{}.ToHeaders() + + // If specified, Get Container Properties only succeeds if the container’s lease is active and matches this ID. + // If there is no active lease or the ID does not match, 412 (Precondition Failed) is returned. + if o.leaseId != "" { + headers.Append("x-ms-lease-id", o.leaseId) + } + + return headers +} + +func (getPropertiesOptions) ToOData() *odata.Query { + return nil +} + +func (getPropertiesOptions) ToQuery() *client.QueryParams { + return containerOptions{}.ToQuery() +} diff --git a/storage/2023-11-03/blob/containers/lease_acquire.go b/storage/2023-11-03/blob/containers/lease_acquire.go new file mode 100644 index 0000000..409f15a --- /dev/null +++ b/storage/2023-11-03/blob/containers/lease_acquire.go @@ -0,0 +1,99 @@ +package containers + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type AcquireLeaseInput struct { + // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. + // A non-infinite lease can be between 15 and 60 seconds + LeaseDuration int + + ProposedLeaseID string +} + +type AcquireLeaseResponse struct { + HttpResponse *client.Response + Model *AcquireLeaseModel +} + +type AcquireLeaseModel struct { + LeaseID string +} + +// AcquireLease establishes and manages a lock on a container for delete operations. +func (c Client) AcquireLease(ctx context.Context, containerName string, input AcquireLeaseInput) (resp AcquireLeaseResponse, err error) { + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + // An infinite lease duration is -1 seconds. A non-infinite lease can be between 15 and 60 seconds + if input.LeaseDuration != -1 && (input.LeaseDuration <= 15 || input.LeaseDuration >= 60) { + return resp, fmt.Errorf("`input.LeaseDuration` must be -1 (infinite), or between 15 and 60 seconds") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: acquireLeaseOptions{ + leaseDuration: input.LeaseDuration, + proposedLeaseId: input.ProposedLeaseID, + }, + Path: fmt.Sprintf("/%s", containerName), + } + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + resp.Model = &AcquireLeaseModel{ + LeaseID: resp.HttpResponse.Header.Get("x-ms-lease-id"), + } + } + + return +} + +var _ client.Options = acquireLeaseOptions{} + +type acquireLeaseOptions struct { + leaseDuration int + proposedLeaseId string +} + +func (o acquireLeaseOptions) ToHeaders() *client.Headers { + headers := containerOptions{}.ToHeaders() + + headers.Append("x-ms-lease-action", "acquire") + headers.Append("x-ms-lease-duration", fmt.Sprintf("%d", o.leaseDuration)) + + if o.proposedLeaseId != "" { + headers.Append("x-ms-proposed-lease-id", o.proposedLeaseId) + } + + return headers +} + +func (o acquireLeaseOptions) ToOData() *odata.Query { + return nil +} + +func (o acquireLeaseOptions) ToQuery() *client.QueryParams { + query := containerOptions{}.ToQuery() + query.Append("comp", "lease") + return query +} diff --git a/storage/2023-11-03/blob/containers/lease_break.go b/storage/2023-11-03/blob/containers/lease_break.go new file mode 100644 index 0000000..0ede9a8 --- /dev/null +++ b/storage/2023-11-03/blob/containers/lease_break.go @@ -0,0 +1,112 @@ +package containers + +import ( + "context" + "fmt" + "net/http" + "strconv" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type BreakLeaseInput struct { + // For a break operation, proposed duration the lease should continue + // before it is broken, in seconds, between 0 and 60. + // This break period is only used if it is shorter than the time remaining on the lease. + // If longer, the time remaining on the lease is used. + // A new lease will not be available before the break period has expired, + // but the lease may be held for longer than the break period. + // If this header does not appear with a break operation, a fixed-duration lease breaks + // after the remaining lease period elapses, and an infinite lease breaks immediately. + BreakPeriod *int + + LeaseID string +} + +type BreakLeaseResponse struct { + HttpResponse *client.Response + Model *BreakLeaseModel +} + +type BreakLeaseModel struct { + // Approximate time remaining in the lease period, in seconds. + // If the break is immediate, 0 is returned. + LeaseTime int +} + +// BreakLease breaks a lock based on it's Lease ID +func (c Client) BreakLease(ctx context.Context, containerName string, input BreakLeaseInput) (resp BreakLeaseResponse, err error) { + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + if input.LeaseID == "" { + return resp, fmt.Errorf("`input.LeaseID` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodPut, + OptionsObject: breakLeaseOptions{ + breakPeriod: input.BreakPeriod, + leaseId: input.LeaseID, + }, + Path: fmt.Sprintf("/%s", containerName), + } + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + leaseRaw := resp.HttpResponse.Header.Get("x-ms-lease-time") + if leaseRaw != "" { + if i, err := strconv.Atoi(leaseRaw); err == nil { + resp.Model = &BreakLeaseModel{ + LeaseTime: i, + } + } + } + } + + return +} + +var _ client.Options = breakLeaseOptions{} + +type breakLeaseOptions struct { + breakPeriod *int + leaseId string +} + +func (o breakLeaseOptions) ToHeaders() *client.Headers { + headers := containerOptions{}.ToHeaders() + + headers.Append("x-ms-lease-action", "break") + headers.Append("x-ms-lease-id", o.leaseId) + + if o.breakPeriod != nil { + headers.Append("x-ms-lease-break-period", fmt.Sprintf("%d", *o.breakPeriod)) + } + + return headers +} + +func (o breakLeaseOptions) ToOData() *odata.Query { + return nil +} + +func (o breakLeaseOptions) ToQuery() *client.QueryParams { + query := containerOptions{}.ToQuery() + query.Append("comp", "lease") + return query +} diff --git a/storage/2023-11-03/blob/containers/lease_change.go b/storage/2023-11-03/blob/containers/lease_change.go new file mode 100644 index 0000000..6326507 --- /dev/null +++ b/storage/2023-11-03/blob/containers/lease_change.go @@ -0,0 +1,95 @@ +package containers + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type ChangeLeaseInput struct { + ExistingLeaseID string + ProposedLeaseID string +} + +type ChangeLeaseResponse struct { + HttpResponse *client.Response + Model *ChangeLeaseModel +} + +type ChangeLeaseModel struct { + LeaseID string +} + +// ChangeLease changes the lock from one Lease ID to another Lease ID +func (c Client) ChangeLease(ctx context.Context, containerName string, input ChangeLeaseInput) (resp ChangeLeaseResponse, err error) { + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + if input.ExistingLeaseID == "" { + return resp, fmt.Errorf("`input.ExistingLeaseID` cannot be an empty string") + } + if input.ProposedLeaseID == "" { + return resp, fmt.Errorf("`input.ProposedLeaseID` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPut, + OptionsObject: changeLeaseOptions{ + existingLeaseId: input.ExistingLeaseID, + proposedLeaseId: input.ProposedLeaseID, + }, + Path: fmt.Sprintf("/%s", containerName), + } + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + resp.Model = &ChangeLeaseModel{ + LeaseID: resp.HttpResponse.Header.Get("x-ms-lease-id"), + } + } + + return +} + +var _ client.Options = changeLeaseOptions{} + +type changeLeaseOptions struct { + existingLeaseId string + proposedLeaseId string +} + +func (o changeLeaseOptions) ToHeaders() *client.Headers { + headers := containerOptions{}.ToHeaders() + + headers.Append("x-ms-lease-action", "change") + headers.Append("x-ms-lease-id", o.existingLeaseId) + headers.Append("x-ms-proposed-lease-id", o.proposedLeaseId) + + return headers +} + +func (o changeLeaseOptions) ToOData() *odata.Query { + return nil +} + +func (o changeLeaseOptions) ToQuery() *client.QueryParams { + query := containerOptions{}.ToQuery() + query.Append("comp", "lease") + return query +} diff --git a/storage/2023-11-03/blob/containers/lease_release.go b/storage/2023-11-03/blob/containers/lease_release.go new file mode 100644 index 0000000..8a2cf41 --- /dev/null +++ b/storage/2023-11-03/blob/containers/lease_release.go @@ -0,0 +1,77 @@ +package containers + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type ReleaseLeaseInput struct { + LeaseId string +} + +type ReleaseLeaseResponse struct { + HttpResponse *client.Response +} + +// ReleaseLease releases the lock based on the Lease ID +func (c Client) ReleaseLease(ctx context.Context, containerName string, input ReleaseLeaseInput) (resp ReleaseLeaseResponse, err error) { + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + if input.LeaseId == "" { + return resp, fmt.Errorf("`input.LeaseId` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPut, + OptionsObject: releaseLeaseOptions{ + leaseId: input.LeaseId, + }, + Path: fmt.Sprintf("/%s", containerName), + } + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +var _ client.Options = releaseLeaseOptions{} + +type releaseLeaseOptions struct { + leaseId string +} + +func (o releaseLeaseOptions) ToHeaders() *client.Headers { + headers := containerOptions{}.ToHeaders() + + headers.Append("x-ms-lease-action", "release") + headers.Append("x-ms-lease-id", o.leaseId) + + return headers +} + +func (o releaseLeaseOptions) ToOData() *odata.Query { + return nil +} + +func (o releaseLeaseOptions) ToQuery() *client.QueryParams { + query := containerOptions{}.ToQuery() + query.Append("comp", "lease") + return query +} diff --git a/storage/2023-11-03/blob/containers/lease_renew.go b/storage/2023-11-03/blob/containers/lease_renew.go new file mode 100644 index 0000000..eda2029 --- /dev/null +++ b/storage/2023-11-03/blob/containers/lease_renew.go @@ -0,0 +1,77 @@ +package containers + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type RenewLeaseInput struct { + LeaseId string +} + +type RenewLeaseResponse struct { + HttpResponse *client.Response +} + +// RenewLease renews the lock based on the Lease ID +func (c Client) RenewLease(ctx context.Context, containerName string, input RenewLeaseInput) (resp RenewLeaseResponse, err error) { + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + if input.LeaseId == "" { + return resp, fmt.Errorf("`input.LeaseId` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPut, + OptionsObject: renewLeaseOptions{ + leaseId: input.LeaseId, + }, + Path: fmt.Sprintf("/%s", containerName), + } + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +var _ client.Options = renewLeaseOptions{} + +type renewLeaseOptions struct { + leaseId string +} + +func (o renewLeaseOptions) ToHeaders() *client.Headers { + headers := containerOptions{}.ToHeaders() + + headers.Append("x-ms-lease-action", "renew") + headers.Append("x-ms-lease-id", o.leaseId) + + return headers +} + +func (o renewLeaseOptions) ToOData() *odata.Query { + return nil +} + +func (o renewLeaseOptions) ToQuery() *client.QueryParams { + query := containerOptions{}.ToQuery() + query.Append("comp", "lease") + return query +} diff --git a/storage/2023-11-03/blob/containers/lifecycle_test.go b/storage/2023-11-03/blob/containers/lifecycle_test.go new file mode 100644 index 0000000..cc0b15a --- /dev/null +++ b/storage/2023-11-03/blob/containers/lifecycle_test.go @@ -0,0 +1,207 @@ +package containers + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +var _ StorageContainer = Client{} + +func TestContainerLifecycle(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + containerName := fmt.Sprintf("cont-%d", testhelpers.RandomInt()) + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindBlobStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + containersClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.blob.%s", testData.StorageAccountName, *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + if err := client.PrepareWithSharedKeyAuth(containersClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + // first let's test an empty container + input := CreateInput{} + _, err = containersClient.Create(ctx, containerName, input) + if err != nil { + t.Fatal(fmt.Errorf("Error creating: %s", err)) + } + + container, err := containersClient.GetProperties(ctx, containerName, GetPropertiesInput{}) + if err != nil { + t.Fatalf("retrieving container: %+v", err) + } + if container.Model == nil { + t.Fatalf("retrieving container: `model` was nil") + } + + if container.Model.AccessLevel != Private { + t.Fatalf("Expected Access Level to be Private but got %q", container.Model.AccessLevel) + } + if len(container.Model.MetaData) != 0 { + t.Fatalf("Expected MetaData to be empty but got: %s", container.Model.MetaData) + } + if container.Model.LeaseStatus != Unlocked { + t.Fatalf("Expected Container Lease to be Unlocked but was: %s", container.Model.LeaseStatus) + } + + // then update the metadata + _, err = containersClient.SetMetaData(ctx, containerName, SetMetaDataInput{ + MetaData: map[string]string{ + "dont": "kill-my-vibe", + }, + }) + if err != nil { + t.Fatal(fmt.Errorf("Error updating metadata: %s", err)) + } + + // give azure time to replicate + time.Sleep(2 * time.Second) + + // then assert that + container, err = containersClient.GetProperties(ctx, containerName, GetPropertiesInput{}) + if err != nil { + t.Fatal(fmt.Errorf("Error re-retrieving: %s", err)) + } + if len(container.Model.MetaData) != 1 { + t.Fatalf("Expected 1 item in the metadata but got: %s", container.Model.MetaData) + } + if container.Model.MetaData["dont"] != "kill-my-vibe" { + t.Fatalf("Expected `kill-my-vibe` but got %q", container.Model.MetaData["dont"]) + } + if container.Model.AccessLevel != Private { + t.Fatalf("Expected Access Level to be Private but got %q", container.Model.AccessLevel) + } + if container.Model.LeaseStatus != Unlocked { + t.Fatalf("Expected Container Lease to be Unlocked but was: %s", container.Model.LeaseStatus) + } + + // then update the ACL + _, err = containersClient.SetAccessControl(ctx, containerName, SetAccessControlInput{ + AccessLevel: Blob, + }) + if err != nil { + t.Fatal(fmt.Errorf("error updating ACL's: %s", err)) + } + + // give azure some time to replicate + time.Sleep(2 * time.Second) + + // then assert that + container, err = containersClient.GetProperties(ctx, containerName, GetPropertiesInput{}) + if err != nil { + t.Fatal(fmt.Errorf("Error re-retrieving: %s", err)) + } + if container.Model.AccessLevel != Blob { + t.Fatalf("Expected Access Level to be Blob but got %q", container.Model.AccessLevel) + } + if len(container.Model.MetaData) != 1 { + t.Fatalf("Expected 1 item in the metadata but got: %s", container.Model.MetaData) + } + if container.Model.LeaseStatus != Unlocked { + t.Fatalf("Expected Container Lease to be Unlocked but was: %s", container.Model.LeaseStatus) + } + + // acquire a lease for 30s + acquireLeaseInput := AcquireLeaseInput{ + LeaseDuration: 30, + } + acquireLeaseResp, err := containersClient.AcquireLease(ctx, containerName, acquireLeaseInput) + if err != nil { + t.Fatalf("Error acquiring lease: %s", err) + } + if acquireLeaseResp.Model == nil { + t.Fatalf("acquiring lease: `model` was nil") + } + t.Logf("[DEBUG] Lease ID: %s", acquireLeaseResp.Model.LeaseID) + + // we should then be able to update the ID + t.Logf("[DEBUG] Changing lease..") + updateLeaseInput := ChangeLeaseInput{ + ExistingLeaseID: acquireLeaseResp.Model.LeaseID, + ProposedLeaseID: "aaaabbbb-aaaa-bbbb-cccc-aaaabbbbcccc", + } + updateLeaseResp, err := containersClient.ChangeLease(ctx, containerName, updateLeaseInput) + if err != nil { + t.Fatalf("changing lease: %+v", err) + } + if updateLeaseResp.Model == nil { + t.Fatalf("changing lease: `model` was nil") + } + + // then renew it + _, err = containersClient.RenewLease(ctx, containerName, RenewLeaseInput{ + LeaseId: updateLeaseResp.Model.LeaseID, + }) + if err != nil { + t.Fatalf("Error renewing lease: %s", err) + } + + // and then give it a timeout + breakPeriod := 20 + breakLeaseInput := BreakLeaseInput{ + LeaseID: updateLeaseResp.Model.LeaseID, + BreakPeriod: &breakPeriod, + } + breakLeaseResp, err := containersClient.BreakLease(ctx, containerName, breakLeaseInput) + if err != nil { + t.Fatalf("breaking lease: %+v", err) + } + if breakLeaseResp.Model == nil { + t.Fatalf("breaking lease: `model` was nil") + } + if breakLeaseResp.Model.LeaseTime == 0 { + t.Fatalf("Lease broke immediately when should have waited: %d", breakLeaseResp.Model.LeaseTime) + } + + // and finally ditch it + _, err = containersClient.ReleaseLease(ctx, containerName, ReleaseLeaseInput{ + LeaseId: updateLeaseResp.Model.LeaseID, + }) + if err != nil { + t.Fatalf("Error releasing lease: %s", err) + } + + t.Logf("[DEBUG] Listing blobs in the container..") + listInput := ListBlobsInput{} + listResult, err := containersClient.ListBlobs(ctx, containerName, listInput) + if err != nil { + t.Fatalf("listing blobs: %+v", err) + } + if listResult.Model == nil { + t.Fatalf("listing blobs: `model` was nil") + } + + if len(listResult.Model.Blobs.Blobs) != 0 { + t.Fatalf("Expected there to be no blobs in the container but got %d", len(listResult.Model.Blobs.Blobs)) + } + + t.Logf("[DEBUG] Deleting..") + if _, err = containersClient.Delete(ctx, containerName); err != nil { + t.Fatal(fmt.Errorf("Error deleting: %s", err)) + } +} diff --git a/storage/2023-11-03/blob/containers/list_blobs.go b/storage/2023-11-03/blob/containers/list_blobs.go new file mode 100644 index 0000000..f3bd467 --- /dev/null +++ b/storage/2023-11-03/blob/containers/list_blobs.go @@ -0,0 +1,169 @@ +package containers + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type ListBlobsInput struct { + Delimiter *string + Include *[]Dataset + Marker *string + MaxResults *int + Prefix *string +} + +type ListBlobsResponse struct { + HttpResponse *client.Response + Model *ListBlobsResult +} + +type ListBlobsResult struct { + Delimiter string `xml:"Delimiter"` + Marker string `xml:"Marker"` + MaxResults int `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker,omitempty"` + Prefix string `xml:"Prefix"` + Blobs Blobs `xml:"Blobs"` +} + +type Blobs struct { + Blobs []BlobDetails `xml:"Blob"` + BlobPrefix *BlobPrefix `xml:"BlobPrefix"` +} + +type BlobDetails struct { + Name string `xml:"Name"` + Deleted bool `xml:"Deleted,omitempty"` + MetaData map[string]interface{} `map:"Metadata,omitempty"` + Properties *BlobProperties `xml:"Properties,omitempty"` + Snapshot *string `xml:"Snapshot,omitempty"` +} + +type BlobProperties struct { + AccessTier *string `xml:"AccessTier,omitempty"` + AccessTierInferred *bool `xml:"AccessTierInferred,omitempty"` + AccessTierChangeTime *string `xml:"AccessTierChangeTime,omitempty"` + BlobType *string `xml:"BlobType,omitempty"` + BlobSequenceNumber *string `xml:"x-ms-blob-sequence-number,omitempty"` + CacheControl *string `xml:"Cache-Control,omitempty"` + ContentEncoding *string `xml:"ContentEncoding,omitempty"` + ContentLanguage *string `xml:"Content-Language,omitempty"` + ContentLength *int64 `xml:"Content-Length,omitempty"` + ContentMD5 *string `xml:"Content-MD5,omitempty"` + ContentType *string `xml:"Content-Type,omitempty"` + CopyCompletionTime *string `xml:"CopyCompletionTime,omitempty"` + CopyId *string `xml:"CopyId,omitempty"` + CopyStatus *string `xml:"CopyStatus,omitempty"` + CopySource *string `xml:"CopySource,omitempty"` + CopyProgress *string `xml:"CopyProgress,omitempty"` + CopyStatusDescription *string `xml:"CopyStatusDescription,omitempty"` + CreationTime *string `xml:"CreationTime,omitempty"` + ETag *string `xml:"Etag,omitempty"` + DeletedTime *string `xml:"DeletedTime,omitempty"` + IncrementalCopy *bool `xml:"IncrementalCopy,omitempty"` + LastModified *string `xml:"Last-Modified,omitempty"` + LeaseDuration *string `xml:"LeaseDuration,omitempty"` + LeaseState *string `xml:"LeaseState,omitempty"` + LeaseStatus *string `xml:"LeaseStatus,omitempty"` + RemainingRetentionDays *string `xml:"RemainingRetentionDays,omitempty"` + ServerEncrypted *bool `xml:"ServerEncrypted,omitempty"` +} + +type BlobPrefix struct { + Name string `xml:"Name"` +} + +// ListBlobs lists the blobs matching the specified query within the specified Container +func (c Client) ListBlobs(ctx context.Context, containerName string, input ListBlobsInput) (resp ListBlobsResponse, err error) { + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + if input.MaxResults != nil && (*input.MaxResults <= 0 || *input.MaxResults > 5000) { + return resp, fmt.Errorf("`input.MaxResults` can either be nil or between 0 and 5000") + } + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: listBlobsOptions{ + delimiter: input.Delimiter, + include: input.Include, + marker: input.Marker, + maxResults: input.MaxResults, + prefix: input.Prefix, + }, + Path: fmt.Sprintf("/%s", containerName), + } + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if err = resp.HttpResponse.Unmarshal(&resp.Model); err != nil { + err = fmt.Errorf("unmarshaling response: %+v", err) + return + } + } + + return +} + +var _ client.Options = listBlobsOptions{} + +type listBlobsOptions struct { + delimiter *string + include *[]Dataset + marker *string + maxResults *int + prefix *string +} + +func (o listBlobsOptions) ToHeaders() *client.Headers { + return nil +} + +func (o listBlobsOptions) ToOData() *odata.Query { + return nil +} + +func (o listBlobsOptions) ToQuery() *client.QueryParams { + query := containerOptions{}.ToQuery() + query.Append("comp", "list") + + if o.delimiter != nil { + query.Append("delimiter", *o.delimiter) + } + if o.include != nil { + vals := make([]string, 0) + for _, v := range *o.include { + vals = append(vals, string(v)) + } + include := strings.Join(vals, ",") + query.Append("include", include) + } + if o.marker != nil { + query.Append("marker", *o.marker) + } + if o.maxResults != nil { + query.Append("maxresults", fmt.Sprintf("%d", *o.maxResults)) + } + if o.prefix != nil { + query.Append("prefix", *o.prefix) + } + return query +} diff --git a/storage/2023-11-03/blob/containers/models.go b/storage/2023-11-03/blob/containers/models.go new file mode 100644 index 0000000..ddfe195 --- /dev/null +++ b/storage/2023-11-03/blob/containers/models.go @@ -0,0 +1,71 @@ +package containers + +type AccessLevel string + +var ( + // Blob specifies public read access for blobs. + // Blob data within this container can be read via anonymous request, + // but container data is not available. + // Clients cannot enumerate blobs within the container via anonymous request. + Blob AccessLevel = "blob" + + // Container specifies full public read access for container and blob data. + // Clients can enumerate blobs within the container via anonymous request, + // but cannot enumerate containers within the storage account. + Container AccessLevel = "container" + + // Private specifies that container data is private to the account owner + Private AccessLevel = "" +) + +type ContainerProperties struct { + AccessLevel AccessLevel + LeaseStatus LeaseStatus + LeaseState LeaseState + LeaseDuration *LeaseDuration + MetaData map[string]string + HasImmutabilityPolicy bool + HasLegalHold bool +} + +type Dataset string + +var ( + Copy Dataset = "copy" + Deleted Dataset = "deleted" + MetaData Dataset = "metadata" + Snapshots Dataset = "snapshots" + UncommittedBlobs Dataset = "uncommittedblobs" +) + +type ErrorResponse struct { + Code *string `xml:"Code"` + Message *string `xml:"Message"` +} + +type LeaseDuration string + +var ( + // If this lease is for a Fixed Duration + Fixed LeaseDuration = "fixed" + + // If this lease is for an Indefinite Duration + Infinite LeaseDuration = "infinite" +) + +type LeaseState string + +var ( + Available LeaseState = "available" + Breaking LeaseState = "breaking" + Broken LeaseState = "broken" + Expired LeaseState = "expired" + Leased LeaseState = "leased" +) + +type LeaseStatus string + +var ( + Locked LeaseStatus = "locked" + Unlocked LeaseStatus = "unlocked" +) diff --git a/storage/2023-11-03/blob/containers/options.go b/storage/2023-11-03/blob/containers/options.go new file mode 100644 index 0000000..73fadf1 --- /dev/null +++ b/storage/2023-11-03/blob/containers/options.go @@ -0,0 +1,35 @@ +package containers + +import ( + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +var _ client.Options = containerOptions{} + +type containerOptions struct { + metaData map[string]string +} + +func (o containerOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + metaDataHeaders := make(map[string]interface{}) + metadata.SetIntoHeaders(metaDataHeaders, o.metaData) + for k, v := range metaDataHeaders { + headers.Append(k, v.(string)) + } + + return headers +} + +func (containerOptions) ToOData() *odata.Query { + return nil +} + +func (containerOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("restype", "container") + return out +} diff --git a/storage/2023-11-03/blob/containers/resource_id.go b/storage/2023-11-03/blob/containers/resource_id.go new file mode 100644 index 0000000..599ef93 --- /dev/null +++ b/storage/2023-11-03/blob/containers/resource_id.go @@ -0,0 +1,12 @@ +package containers + +import ( + "fmt" +) + +// GetResourceManagerResourceID returns the Resource Manager specific +// ResourceID for a specific Storage Container +func (c Client) GetResourceManagerResourceID(subscriptionID, resourceGroup, accountName, containerName string) string { + fmtStr := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Storage/storageAccounts/%s/blobServices/default/containers/%s" + return fmt.Sprintf(fmtStr, subscriptionID, resourceGroup, accountName, containerName) +} diff --git a/storage/2023-11-03/blob/containers/resource_id_test.go b/storage/2023-11-03/blob/containers/resource_id_test.go new file mode 100644 index 0000000..e46e5e8 --- /dev/null +++ b/storage/2023-11-03/blob/containers/resource_id_test.go @@ -0,0 +1,13 @@ +package containers + +import ( + "testing" +) + +func TestGetResourceManagerResourceID(t *testing.T) { + actual := Client{}.GetResourceManagerResourceID("11112222-3333-4444-5555-666677778888", "group1", "account1", "container1") + expected := "/subscriptions/11112222-3333-4444-5555-666677778888/resourceGroups/group1/providers/Microsoft.Storage/storageAccounts/account1/blobServices/default/containers/container1" + if actual != expected { + t.Fatalf("Expected the Resource Manager Resource ID to be %q but got %q", expected, actual) + } +} diff --git a/storage/2023-11-03/blob/containers/set_acl.go b/storage/2023-11-03/blob/containers/set_acl.go new file mode 100644 index 0000000..6420537 --- /dev/null +++ b/storage/2023-11-03/blob/containers/set_acl.go @@ -0,0 +1,86 @@ +package containers + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type SetAccessControlInput struct { + AccessLevel AccessLevel + LeaseId string +} + +type SetAccessControlResponse struct { + HttpResponse *client.Response +} + +// SetAccessControl sets the Access Control for a Container without a Lease ID +// NOTE: The SetAccessControl operation only supports Shared Key authorization. +func (c Client) SetAccessControl(ctx context.Context, containerName string, input SetAccessControlInput) (resp SetAccessControlResponse, err error) { + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPut, + OptionsObject: setAccessControlListOptions{ + accessLevel: input.AccessLevel, + leaseId: input.LeaseId, + }, + Path: fmt.Sprintf("/%s", containerName), + } + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +var _ client.Options = setAccessControlListOptions{} + +type setAccessControlListOptions struct { + accessLevel AccessLevel + leaseId string +} + +func (o setAccessControlListOptions) ToHeaders() *client.Headers { + headers := containerOptions{}.ToHeaders() + + // If this header is not included in the request, container data is private to the account owner. + if o.accessLevel != Private { + headers.Append("x-ms-blob-public-access", string(o.accessLevel)) + } + + // If specified, Get Container Properties only succeeds if the container’s lease is active and matches this ID. + // If there is no active lease or the ID does not match, 412 (Precondition Failed) is returned. + if o.leaseId != "" { + headers.Append("x-ms-lease-id", o.leaseId) + } + + return headers +} + +func (o setAccessControlListOptions) ToOData() *odata.Query { + return nil +} + +func (o setAccessControlListOptions) ToQuery() *client.QueryParams { + query := containerOptions{}.ToQuery() + query.Append("comp", "acl") + return query +} diff --git a/storage/2023-11-03/blob/containers/set_metadata.go b/storage/2023-11-03/blob/containers/set_metadata.go new file mode 100644 index 0000000..578c9a1 --- /dev/null +++ b/storage/2023-11-03/blob/containers/set_metadata.go @@ -0,0 +1,86 @@ +package containers + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type SetMetaDataInput struct { + MetaData map[string]string + LeaseId string +} + +type SetMetaDataResponse struct { + HttpResponse *client.Response +} + +// SetMetaData sets the specified MetaData on the Container without a Lease ID +func (c Client) SetMetaData(ctx context.Context, containerName string, input SetMetaDataInput) (resp SetMetaDataResponse, err error) { + if containerName == "" { + return resp, fmt.Errorf("`containerName` cannot be an empty string") + } + if err := metadata.Validate(input.MetaData); err != nil { + return resp, fmt.Errorf("`input.MetaData` is not valid: %s", err) + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPut, + OptionsObject: setMetaDataOptions{ + metaData: input.MetaData, + leaseId: input.LeaseId, + }, + Path: fmt.Sprintf("/%s", containerName), + } + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +var _ client.Options = setMetaDataOptions{} + +type setMetaDataOptions struct { + metaData map[string]string + leaseId string +} + +func (o setMetaDataOptions) ToHeaders() *client.Headers { + headers := containerOptions{ + metaData: o.metaData, + }.ToHeaders() + + // If specified, Get Container Properties only succeeds if the container’s lease is active and matches this ID. + // If there is no active lease or the ID does not match, 412 (Precondition Failed) is returned. + if o.leaseId != "" { + headers.Append("x-ms-lease-id", o.leaseId) + } + + return headers +} + +func (o setMetaDataOptions) ToOData() *odata.Query { + return nil +} + +func (o setMetaDataOptions) ToQuery() *client.QueryParams { + query := containerOptions{}.ToQuery() + query.Append("comp", "metadata") + return query +} diff --git a/storage/2023-11-03/blob/containers/version.go b/storage/2023-11-03/blob/containers/version.go new file mode 100644 index 0000000..1806c68 --- /dev/null +++ b/storage/2023-11-03/blob/containers/version.go @@ -0,0 +1,4 @@ +package containers + +const apiVersion = "2023-11-03" +const componentName = "blob/containers" diff --git a/storage/2023-11-03/datalakestore/filesystems/README.md b/storage/2023-11-03/datalakestore/filesystems/README.md new file mode 100644 index 0000000..fe1c49a --- /dev/null +++ b/storage/2023-11-03/datalakestore/filesystems/README.md @@ -0,0 +1,79 @@ +## Data Lake Storage Gen2 File Systems SDK for API version 2020-08-04 + +This package allows you to interact with the Data Lake Storage Gen2 File Systems API + +### Supported Authorizers + +* Azure Active Directory (for the Resource Endpoint `https://storage.azure.com`) + +### Example Usage + +```go +package main + +import ( + "context" + "fmt" + "os" + + "github.com/hashicorp/go-azure-helpers/authentication" + "github.com/hashicorp/go-azure-helpers/sender" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/datalakestore/filesystems" +) + +func Example() error { + accountName := "storageaccount1" + fileSystemName := "filesystem1" + + builder := &authentication.Builder{ + SubscriptionID: os.Getenv("ARM_SUBSCRIPTION_ID"), + ClientID: os.Getenv("ARM_CLIENT_ID"), + ClientSecret: os.Getenv("ARM_CLIENT_SECRET"), + TenantID: os.Getenv("ARM_TENANT_ID"), + Environment: os.Getenv("ARM_ENVIRONMENT"), + + // Feature Toggles + SupportsClientSecretAuth: true, + } + + c, err := builder.Build() + if err != nil { + return fmt.Errorf("Error building AzureRM Client: %s", err) + } + + env, err := authentication.DetermineEnvironment(c.Environment) + if err != nil { + return err + } + + oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, c.TenantID) + if err != nil { + return err + } + + // OAuthConfigForTenant returns a pointer, which can be nil. + if oauthConfig == nil { + return fmt.Errorf("Unable to configure OAuthConfig for tenant %s", c.TenantID) + } + + sender := sender.BuildSender("AzureRM") + ctx := context.Background() + + storageAuth, err := config.GetAuthorizationToken(sender, oauthConfig, "https://storage.azure.com/") + if err != nil { + return fmt.Errorf("Error retrieving Authorization Token") + } + + fileSystemsClient, err := filesystems.NewWithBaseUri(fmt.Sprintf("https://%s.dfs.core.windows.net", accountName)) + fileSystemsClient.Client.WithAuthorizer(storageAuth) + + input := filesystems.CreateInput{ + Properties: map[string]string{}, + } + if _, err = fileSystemsClient.Create(ctx, fileSystemName, input); err != nil { + return fmt.Errorf("Error creating: %s", err) + } + + return nil +} +``` \ No newline at end of file diff --git a/storage/2023-11-03/datalakestore/filesystems/client.go b/storage/2023-11-03/datalakestore/filesystems/client.go new file mode 100644 index 0000000..27e96d1 --- /dev/null +++ b/storage/2023-11-03/datalakestore/filesystems/client.go @@ -0,0 +1,23 @@ +package filesystems + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/dataplane/storage" +) + +// Client is the base client for Data Lake Store Filesystems. +type Client struct { + Client *storage.BaseClient +} + +func NewWithBaseUri(baseUri string) (*Client, error) { + baseClient, err := storage.NewBaseClient(baseUri, componentName, apiVersion) + if err != nil { + return nil, fmt.Errorf("building base client: %+v", err) + } + + return &Client{ + Client: baseClient, + }, nil +} diff --git a/storage/2023-11-03/datalakestore/filesystems/create.go b/storage/2023-11-03/datalakestore/filesystems/create.go new file mode 100644 index 0000000..d0d4bab --- /dev/null +++ b/storage/2023-11-03/datalakestore/filesystems/create.go @@ -0,0 +1,79 @@ +package filesystems + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type CreateInput struct { + // A map of base64-encoded strings to store as user-defined properties with the File System + // Note that items may only contain ASCII characters in the ISO-8859-1 character set. + // This automatically gets converted to a comma-separated list of name and + // value pairs before sending to the API + Properties map[string]string +} + +type CreateResponse struct { + HttpResponse *client.Response +} + +// Create creates a Data Lake Store Gen2 FileSystem within a Storage Account +func (c Client) Create(ctx context.Context, fileSystemName string, input CreateInput) (resp CreateResponse, err error) { + + if fileSystemName == "" { + return resp, fmt.Errorf("`fileSystemName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: createOptions{ + properties: input.Properties, + }, + + Path: fmt.Sprintf("/%s", fileSystemName), + } + + req, err := c.Client.NewRequest(ctx, opts) + + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type createOptions struct { + properties map[string]string +} + +func (o createOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + props := buildProperties(o.properties) + if props != "" { + headers.Append("x-ms-properties", props) + } + + return headers +} + +func (createOptions) ToOData() *odata.Query { + return nil +} + +func (createOptions) ToQuery() *client.QueryParams { + return fileSystemOptions{}.ToQuery() +} diff --git a/storage/2023-11-03/datalakestore/filesystems/create_test.go b/storage/2023-11-03/datalakestore/filesystems/create_test.go new file mode 100644 index 0000000..1e017bb --- /dev/null +++ b/storage/2023-11-03/datalakestore/filesystems/create_test.go @@ -0,0 +1,69 @@ +package filesystems + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +func TestCreateHasNoTagsByDefault(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + fileSystemName := fmt.Sprintf("acctestfs-%s", testhelpers.RandomString()) + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindBlobStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + + fileSystemsClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.%s.%s", accountName, "dfs", *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(fileSystemsClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + t.Logf("[DEBUG] Creating an empty File System..") + input := CreateInput{ + Properties: map[string]string{}, + } + if _, err = fileSystemsClient.Create(ctx, fileSystemName, input); err != nil { + t.Fatal(fmt.Errorf("Error creating: %s", err)) + } + + t.Logf("[DEBUG] Retrieving the Properties..") + props, err := fileSystemsClient.GetProperties(ctx, fileSystemName) + if err != nil { + t.Fatal(fmt.Errorf("Error getting properties: %s", err)) + } + + if len(props.Properties) != 0 { + t.Fatalf("Expected 0 properties by default but got %d", len(props.Properties)) + } + + t.Logf("[DEBUG] Deleting File System..") + if _, err := fileSystemsClient.Delete(ctx, fileSystemName); err != nil { + t.Fatalf("Error deleting: %s", err) + } +} diff --git a/storage/2023-11-03/datalakestore/filesystems/delete.go b/storage/2023-11-03/datalakestore/filesystems/delete.go new file mode 100644 index 0000000..a6d33b3 --- /dev/null +++ b/storage/2023-11-03/datalakestore/filesystems/delete.go @@ -0,0 +1,43 @@ +package filesystems + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" +) + +type DeleteResponse struct { + HttpResponse *client.Response +} + +// Delete deletes a Data Lake Store Gen2 FileSystem within a Storage Account +func (c Client) Delete(ctx context.Context, fileSystemName string) (resp DeleteResponse, err error) { + + if fileSystemName == "" { + return resp, fmt.Errorf("`fileSystemName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodDelete, + OptionsObject: fileSystemOptions{}, + Path: fmt.Sprintf("/%s", fileSystemName), + } + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} diff --git a/storage/2023-11-03/datalakestore/filesystems/helpers.go b/storage/2023-11-03/datalakestore/filesystems/helpers.go new file mode 100644 index 0000000..605ed42 --- /dev/null +++ b/storage/2023-11-03/datalakestore/filesystems/helpers.go @@ -0,0 +1,40 @@ +package filesystems + +import ( + "fmt" + "strings" +) + +func buildProperties(input map[string]string) string { + // properties has to be a comma-separated key-value pair + properties := make([]string, 0) + + for k, v := range input { + properties = append(properties, fmt.Sprintf("%s=%s", k, v)) + } + + return strings.Join(properties, ",") +} + +func parseProperties(input string) (*map[string]string, error) { + properties := make(map[string]string) + if input == "" { + return &properties, nil + } + + // properties is a comma-separated list of key-value pairs + splitProperties := strings.Split(input, ",") + for _, propertyRaw := range splitProperties { + // because these are base64-encoded they're likely to end in at least one = + // as such we can't string split on that -_- + position := strings.Index(propertyRaw, "=") + if position < 0 { + return nil, fmt.Errorf("Expected there to be an equals in the key value pair: %q", propertyRaw) + } + + key := propertyRaw[0:position] + value := propertyRaw[position+1:] + properties[key] = value + } + return &properties, nil +} diff --git a/storage/2023-11-03/datalakestore/filesystems/helpers_test.go b/storage/2023-11-03/datalakestore/filesystems/helpers_test.go new file mode 100644 index 0000000..efc9009 --- /dev/null +++ b/storage/2023-11-03/datalakestore/filesystems/helpers_test.go @@ -0,0 +1,76 @@ +package filesystems + +import ( + "reflect" + "testing" +) + +func TestParseProperties(t *testing.T) { + testData := []struct { + name string + input string + expected map[string]string + expectError bool + }{ + { + name: "no items", + input: "", + expected: map[string]string{}, + expectError: false, + }, + { + name: "invalid item", + input: "hello", + expectError: true, + }, + { + name: "single item", + input: "hello=world", + expected: map[string]string{ + "hello": "world", + }, + }, + { + name: "single-item-base64", + input: "hello=aGVsbG8=", + expected: map[string]string{ + "hello": "aGVsbG8=", + }, + expectError: false, + }, + { + name: "single-item-base64-multipleequals", + input: "hello=d29uZGVybGFuZA==", + expected: map[string]string{ + "hello": "d29uZGVybGFuZA==", + }, + expectError: false, + }, + { + name: "multiple-items-base64", + input: "hello=d29uZGVybGFuZA==,private=ZXll", + + expected: map[string]string{ + "hello": "d29uZGVybGFuZA==", + "private": "ZXll", + }, + expectError: false, + }, + } + + for _, testCase := range testData { + t.Logf("[DEBUG] Test %q", testCase.name) + + actual, err := parseProperties(testCase.input) + if err != nil { + if testCase.expectError { + continue + } + + t.Fatalf("[DEBUG] Didn't expect an error but got %s", err) + } + if !reflect.DeepEqual(testCase.expected, *actual) { + t.Fatalf("Expected %+v but got %+v", testCase.expected, *actual) + } + } +} diff --git a/storage/2023-11-03/datalakestore/filesystems/lifecycle_test.go b/storage/2023-11-03/datalakestore/filesystems/lifecycle_test.go new file mode 100644 index 0000000..a05681c --- /dev/null +++ b/storage/2023-11-03/datalakestore/filesystems/lifecycle_test.go @@ -0,0 +1,99 @@ +package filesystems + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +func TestLifecycle(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + fileSystemName := fmt.Sprintf("acctestfs-%s", testhelpers.RandomString()) + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindBlobStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + fileSystemsClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.%s.%s", accountName, "dfs", *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(fileSystemsClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + t.Logf("[DEBUG] Creating an empty File System..") + input := CreateInput{ + Properties: map[string]string{ + "hello": "aGVsbG8=", + }, + } + if _, err = fileSystemsClient.Create(ctx, fileSystemName, input); err != nil { + t.Fatal(fmt.Errorf("Error creating: %s", err)) + } + + t.Logf("[DEBUG] Retrieving the Properties..") + props, err := fileSystemsClient.GetProperties(ctx, fileSystemName) + if err != nil { + t.Fatal(fmt.Errorf("Error getting properties: %s", err)) + } + + if len(props.Properties) != 1 { + t.Fatalf("Expected 1 properties by default but got %d", len(props.Properties)) + } + if props.Properties["hello"] != "aGVsbG8=" { + t.Fatalf("Expected `hello` to be `aGVsbG8=` but got %q", props.Properties["hello"]) + } + + t.Logf("[DEBUG] Updating the properties..") + setInput := SetPropertiesInput{ + Properties: map[string]string{ + "hello": "d29uZGVybGFuZA==", + "private": "ZXll", + }, + } + if _, err := fileSystemsClient.SetProperties(ctx, fileSystemName, setInput); err != nil { + t.Fatalf("Error setting properties: %s", err) + } + + t.Logf("[DEBUG] Re-Retrieving the Properties..") + props, err = fileSystemsClient.GetProperties(ctx, fileSystemName) + if err != nil { + t.Fatal(fmt.Errorf("Error getting properties: %s", err)) + } + if len(props.Properties) != 2 { + t.Fatalf("Expected 2 properties by default but got %d", len(props.Properties)) + } + if props.Properties["hello"] != "d29uZGVybGFuZA==" { + t.Fatalf("Expected `hello` to be `d29uZGVybGFuZA==` but got %q", props.Properties["hello"]) + } + if props.Properties["private"] != "ZXll" { + t.Fatalf("Expected `private` to be `ZXll` but got %q", props.Properties["private"]) + } + + t.Logf("[DEBUG] Deleting File System..") + if _, err := fileSystemsClient.Delete(ctx, fileSystemName); err != nil { + t.Fatalf("Error deleting: %s", err) + } +} diff --git a/storage/2023-11-03/datalakestore/filesystems/options.go b/storage/2023-11-03/datalakestore/filesystems/options.go new file mode 100644 index 0000000..aee33a0 --- /dev/null +++ b/storage/2023-11-03/datalakestore/filesystems/options.go @@ -0,0 +1,32 @@ +package filesystems + +import ( + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +var _ client.Options = fileSystemOptions{} + +type fileSystemOptions struct { + properties map[string]string +} + +func (o fileSystemOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + props := buildProperties(o.properties) + if props != "" { + headers.Append("x-ms-properties", props) + } + + return headers +} + +func (fileSystemOptions) ToOData() *odata.Query { + return nil +} + +func (fileSystemOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("resource", "filesystem") + return out +} diff --git a/storage/2023-11-03/datalakestore/filesystems/properties_get.go b/storage/2023-11-03/datalakestore/filesystems/properties_get.go new file mode 100644 index 0000000..e8e4a67 --- /dev/null +++ b/storage/2023-11-03/datalakestore/filesystems/properties_get.go @@ -0,0 +1,68 @@ +package filesystems + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" +) + +type GetPropertiesResponse struct { + HttpResponse *client.Response + + // A map of base64-encoded strings to store as user-defined properties with the File System + // Note that items may only contain ASCII characters in the ISO-8859-1 character set. + // This automatically gets converted to a comma-separated list of name and + // value pairs before sending to the API + Properties map[string]string + + // Is Hierarchical Namespace Enabled? + NamespaceEnabled bool +} + +// GetProperties gets the properties for a Data Lake Store Gen2 FileSystem within a Storage Account +func (c Client) GetProperties(ctx context.Context, fileSystemName string) (resp GetPropertiesResponse, err error) { + + if fileSystemName == "" { + return resp, fmt.Errorf("`fileSystemName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodHead, + OptionsObject: fileSystemOptions{}, + Path: fmt.Sprintf("/%s", fileSystemName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + + propertiesRaw := resp.HttpResponse.Header.Get("x-ms-properties") + var properties *map[string]string + properties, err = parseProperties(propertiesRaw) + if err != nil { + return + } + + resp.Properties = *properties + resp.NamespaceEnabled = strings.EqualFold(resp.HttpResponse.Header.Get("x-ms-namespace-enabled"), "true") + + } + return +} diff --git a/storage/2023-11-03/datalakestore/filesystems/properties_set.go b/storage/2023-11-03/datalakestore/filesystems/properties_set.go new file mode 100644 index 0000000..82c259a --- /dev/null +++ b/storage/2023-11-03/datalakestore/filesystems/properties_set.go @@ -0,0 +1,99 @@ +package filesystems + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type SetPropertiesInput struct { + // A map of base64-encoded strings to store as user-defined properties with the File System + // Note that items may only contain ASCII characters in the ISO-8859-1 character set. + // This automatically gets converted to a comma-separated list of name and + // value pairs before sending to the API + Properties map[string]string + + // Optional - A date and time value. + // Specify this header to perform the operation only if the resource has been modified since the specified date and time. + IfModifiedSince *string + + // Optional - A date and time value. + // Specify this header to perform the operation only if the resource has not been modified since the specified date and time. + IfUnmodifiedSince *string +} + +type SetPropertiesResponse struct { + HttpResponse *client.Response +} + +// SetProperties sets the Properties for a Data Lake Store Gen2 FileSystem within a Storage Account +func (c Client) SetProperties(ctx context.Context, fileSystemName string, input SetPropertiesInput) (resp SetPropertiesResponse, err error) { + + if fileSystemName == "" { + return resp, fmt.Errorf("`fileSystemName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + OptionsObject: setPropertiesOptions{ + properties: input.Properties, + ifUnmodifiedSince: input.IfUnmodifiedSince, + ifModifiedSince: input.IfModifiedSince, + }, + + Path: fmt.Sprintf("/%s", fileSystemName), + } + + req, err := c.Client.NewRequest(ctx, opts) + + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type setPropertiesOptions struct { + properties map[string]string + ifModifiedSince *string + ifUnmodifiedSince *string +} + +func (o setPropertiesOptions) ToHeaders() *client.Headers { + + headers := &client.Headers{} + props := buildProperties(o.properties) + if props != "" { + headers.Append("x-ms-properties", props) + } + + if o.ifModifiedSince != nil { + headers.Append("If-Modified-Since", *o.ifModifiedSince) + } + if o.ifUnmodifiedSince != nil { + headers.Append("If-Unmodified-Since", *o.ifUnmodifiedSince) + } + + return headers +} + +func (setPropertiesOptions) ToOData() *odata.Query { + return nil +} + +func (setPropertiesOptions) ToQuery() *client.QueryParams { + return fileSystemOptions{}.ToQuery() +} diff --git a/storage/2023-11-03/datalakestore/filesystems/resource_id.go b/storage/2023-11-03/datalakestore/filesystems/resource_id.go new file mode 100644 index 0000000..1090704 --- /dev/null +++ b/storage/2023-11-03/datalakestore/filesystems/resource_id.go @@ -0,0 +1,12 @@ +package filesystems + +import ( + "fmt" +) + +// GetResourceManagerResourceID returns the Resource Manager specific +// ResourceID for a specific dfs filesystem +func (c Client) GetResourceManagerResourceID(subscriptionID, resourceGroup, accountName, fileSystemName string) string { + fmtStr := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Storage/storageAccounts/%s/blobServices/default/containers/%s" + return fmt.Sprintf(fmtStr, subscriptionID, resourceGroup, accountName, fileSystemName) +} diff --git a/storage/2023-11-03/datalakestore/filesystems/resource_id_test.go b/storage/2023-11-03/datalakestore/filesystems/resource_id_test.go new file mode 100644 index 0000000..441438f --- /dev/null +++ b/storage/2023-11-03/datalakestore/filesystems/resource_id_test.go @@ -0,0 +1,13 @@ +package filesystems + +import ( + "testing" +) + +func TestGetResourceManagerResourceID(t *testing.T) { + actual := Client{}.GetResourceManagerResourceID("11112222-3333-4444-5555-666677778888", "group1", "account1", "container1") + expected := "/subscriptions/11112222-3333-4444-5555-666677778888/resourceGroups/group1/providers/Microsoft.Storage/storageAccounts/account1/blobServices/default/containers/container1" + if actual != expected { + t.Fatalf("Expected the Resource Manager Resource ID to be %q but got %q", expected, actual) + } +} diff --git a/storage/2023-11-03/datalakestore/filesystems/version.go b/storage/2023-11-03/datalakestore/filesystems/version.go new file mode 100644 index 0000000..954c9ca --- /dev/null +++ b/storage/2023-11-03/datalakestore/filesystems/version.go @@ -0,0 +1,4 @@ +package filesystems + +const apiVersion = "2023-11-03" +const componentName = "datalakestore/filesystems" diff --git a/storage/2023-11-03/datalakestore/paths/client.go b/storage/2023-11-03/datalakestore/paths/client.go new file mode 100644 index 0000000..27e70be --- /dev/null +++ b/storage/2023-11-03/datalakestore/paths/client.go @@ -0,0 +1,22 @@ +package paths + +import ( + "fmt" + "github.com/hashicorp/go-azure-sdk/sdk/client/dataplane/storage" +) + +// Client is the base client for Data Lake Storage Path +type Client struct { + Client *storage.BaseClient +} + +func NewWithBaseUri(baseUri string) (*Client, error) { + baseClient, err := storage.NewBaseClient(baseUri, componentName, apiVersion) + if err != nil { + return nil, fmt.Errorf("building base client: %+v", err) + } + + return &Client{ + Client: baseClient, + }, nil +} diff --git a/storage/2023-11-03/datalakestore/paths/create.go b/storage/2023-11-03/datalakestore/paths/create.go new file mode 100644 index 0000000..c95ea17 --- /dev/null +++ b/storage/2023-11-03/datalakestore/paths/create.go @@ -0,0 +1,72 @@ +package paths + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type PathResource string + +const PathResourceFile PathResource = "file" +const PathResourceDirectory PathResource = "directory" + +type CreateInput struct { + Resource PathResource +} + +type CreateResponse struct { + HttpResponse *client.Response +} + +// Create creates a Data Lake Store Gen2 Path within a Storage Account +func (c Client) Create(ctx context.Context, fileSystemName string, path string, input CreateInput) (resp CreateResponse, err error) { + + if fileSystemName == "" { + return resp, fmt.Errorf("`fileSystemName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: CreateInput{ + Resource: input.Resource, + }, + + Path: fmt.Sprintf("/%s/%s", fileSystemName, path), + } + + req, err := c.Client.NewRequest(ctx, opts) + + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return resp, err + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return resp, err + } + + return +} + +func (c CreateInput) ToHeaders() *client.Headers { + return nil +} + +func (c CreateInput) ToOData() *odata.Query { + return nil +} + +func (c CreateInput) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("resource", string(c.Resource)) + return out +} diff --git a/storage/2023-11-03/datalakestore/paths/create_test.go b/storage/2023-11-03/datalakestore/paths/create_test.go new file mode 100644 index 0000000..5fd2706 --- /dev/null +++ b/storage/2023-11-03/datalakestore/paths/create_test.go @@ -0,0 +1,82 @@ +package paths + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/datalakestore/filesystems" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +func TestCreateDirectory(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + fileSystemName := fmt.Sprintf("acctestfs-%s", testhelpers.RandomString()) + path := "test" + + testData, err := client.BuildTestResourcesWithHns(ctx, resourceGroup, accountName, storageaccounts.KindBlobStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + + baseUri := fmt.Sprintf("https://%s.%s.%s", accountName, "dfs", *domainSuffix) + + fileSystemsClient, err := filesystems.NewWithBaseUri(baseUri) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(fileSystemsClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + t.Logf("[DEBUG] Creating an empty File System..") + fileSystemInput := filesystems.CreateInput{ + Properties: map[string]string{}, + } + if _, err = fileSystemsClient.Create(ctx, fileSystemName, fileSystemInput); err != nil { + t.Fatal(fmt.Errorf("error creating: %s", err)) + } + + t.Logf("[DEBUG] Creating path..") + + pathsClient, err := NewWithBaseUri(baseUri) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(pathsClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + input := CreateInput{ + Resource: PathResourceDirectory, + } + + if _, err = pathsClient.Create(ctx, fileSystemName, path, input); err != nil { + t.Fatal(fmt.Errorf("error creating path: %s", err)) + } + + t.Logf("[DEBUG] Deleting File System..") + if _, err := fileSystemsClient.Delete(ctx, fileSystemName); err != nil { + t.Fatalf("Error deleting: %s", err) + } +} diff --git a/storage/2023-11-03/datalakestore/paths/delete.go b/storage/2023-11-03/datalakestore/paths/delete.go new file mode 100644 index 0000000..4ea6e93 --- /dev/null +++ b/storage/2023-11-03/datalakestore/paths/delete.go @@ -0,0 +1,43 @@ +package paths + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" +) + +type DeleteResponse struct { + HttpResponse *client.Response +} + +// Delete deletes a Data Lake Store Gen2 FileSystem within a Storage Account +func (c Client) Delete(ctx context.Context, fileSystemName string, path string) (resp DeleteResponse, err error) { + + if fileSystemName == "" { + return resp, fmt.Errorf("`fileSystemName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + OptionsObject: nil, + Path: fmt.Sprintf("/%s/%s", fileSystemName, path), + } + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} diff --git a/storage/2023-11-03/datalakestore/paths/helpers.go b/storage/2023-11-03/datalakestore/paths/helpers.go new file mode 100644 index 0000000..e6ef6f4 --- /dev/null +++ b/storage/2023-11-03/datalakestore/paths/helpers.go @@ -0,0 +1,15 @@ +package paths + +import ( + "fmt" +) + +func parsePathResource(input string) (PathResource, error) { + switch input { + case "file": + return PathResourceFile, nil + case "directory": + return PathResourceDirectory, nil + } + return "", fmt.Errorf("Unhandled path resource type %q", input) +} diff --git a/storage/2023-11-03/datalakestore/paths/lifecycle_test.go b/storage/2023-11-03/datalakestore/paths/lifecycle_test.go new file mode 100644 index 0000000..2d1021d --- /dev/null +++ b/storage/2023-11-03/datalakestore/paths/lifecycle_test.go @@ -0,0 +1,120 @@ +package paths + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/datalakestore/filesystems" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +func TestLifecycle(t *testing.T) { + const defaultACLString = "user::rwx,group::r-x,other::---" + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + fileSystemName := fmt.Sprintf("acctestfs-%s", testhelpers.RandomString()) + path := "test" + + testData, err := client.BuildTestResourcesWithHns(ctx, resourceGroup, accountName, storageaccounts.KindBlobStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + + baseUri := fmt.Sprintf("https://%s.%s.%s", accountName, "dfs", *domainSuffix) + + fileSystemsClient, err := filesystems.NewWithBaseUri(baseUri) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + if err := client.PrepareWithSharedKeyAuth(fileSystemsClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + pathsClient, err := NewWithBaseUri(baseUri) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + if err := client.PrepareWithSharedKeyAuth(pathsClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + t.Logf("[DEBUG] Creating an empty File System..") + fileSystemInput := filesystems.CreateInput{} + if _, err = fileSystemsClient.Create(ctx, fileSystemName, fileSystemInput); err != nil { + t.Fatal(fmt.Errorf("error creating: %s", err)) + } + + t.Logf("[DEBUG] Creating folder 'test' ..") + input := CreateInput{ + Resource: PathResourceDirectory, + } + if _, err = pathsClient.Create(ctx, fileSystemName, path, input); err != nil { + t.Fatal(fmt.Errorf("error creating: %s", err)) + } + + t.Logf("[DEBUG] Getting properties for folder 'test' ..") + props, err := pathsClient.GetProperties(ctx, fileSystemName, path, GetPropertiesInput{action: GetPropertiesActionGetAccessControl}) + if err != nil { + t.Fatal(fmt.Errorf("error getting properties: %s", err)) + } + t.Logf("[DEBUG] Props.Owner: %q", props.Owner) + t.Logf("[DEBUG] Props.Group: %q", props.Group) + t.Logf("[DEBUG] Props.ACL: %q", props.ACL) + t.Logf("[DEBUG] Props.ETag: %q", props.ETag) + t.Logf("[DEBUG] Props.LastModified: %q", props.LastModified) + if props.ACL != defaultACLString { + t.Fatal(fmt.Errorf("Expected Default ACL %q, got %q", defaultACLString, props.ACL)) + } + + newACL := "user::rwx,group::r-x,other::r-x,default:user::rwx,default:group::r-x,default:other::---" + accessControlInput := SetAccessControlInput{ + ACL: &newACL, + } + t.Logf("[DEBUG] Setting Access Control for folder 'test' ..") + if _, err = pathsClient.SetAccessControl(ctx, fileSystemName, path, accessControlInput); err != nil { + t.Fatal(fmt.Errorf("error setting Access Control %s", err)) + } + + t.Logf("[DEBUG] Getting properties for folder 'test' (2) ..") + props, err = pathsClient.GetProperties(ctx, fileSystemName, path, GetPropertiesInput{action: GetPropertiesActionGetAccessControl}) + if err != nil { + t.Fatal(fmt.Errorf("error getting properties (2): %s", err)) + } + if props.ACL != newACL { + t.Fatal(fmt.Errorf("expected new ACL %q, got %q", newACL, props.ACL)) + } + + t.Logf("[DEBUG] Deleting path 'test' ..") + if _, err = pathsClient.Delete(ctx, fileSystemName, path); err != nil { + t.Fatal(fmt.Errorf("error deleting path: %s", err)) + } + + t.Logf("[DEBUG] Getting properties for folder 'test' (3) ..") + props, err = pathsClient.GetProperties(ctx, fileSystemName, path, GetPropertiesInput{action: GetPropertiesActionGetAccessControl}) + if err == nil { + t.Fatal(fmt.Errorf("didn't get error getting properties after deleting path (3)")) + } + + t.Logf("[DEBUG] Deleting File System..") + if _, err := fileSystemsClient.Delete(ctx, fileSystemName); err != nil { + t.Fatalf("Error deleting filesystem: %s", err) + } +} diff --git a/storage/2023-11-03/datalakestore/paths/properties_get.go b/storage/2023-11-03/datalakestore/paths/properties_get.go new file mode 100644 index 0000000..2bb81af --- /dev/null +++ b/storage/2023-11-03/datalakestore/paths/properties_get.go @@ -0,0 +1,103 @@ +package paths + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type GetPropertiesResponse struct { + HttpResponse *client.Response + + ETag string + LastModified time.Time + // ResourceType is only returned for GetPropertiesActionGetStatus requests + ResourceType PathResource + Owner string + Group string + // ACL is only returned for GetPropertiesActionGetAccessControl requests + ACL string +} + +type GetPropertiesInput struct { + action GetPropertiesAction +} + +type GetPropertiesAction string + +const ( + GetPropertiesActionGetStatus GetPropertiesAction = "getStatus" + GetPropertiesActionGetAccessControl GetPropertiesAction = "getAccessControl" +) + +// GetProperties gets the properties for a Data Lake Store Gen2 Path in a FileSystem within a Storage Account +func (c Client) GetProperties(ctx context.Context, fileSystemName string, path string, input GetPropertiesInput) (resp GetPropertiesResponse, err error) { + if fileSystemName == "" { + return resp, fmt.Errorf("`fileSystemName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodHead, + OptionsObject: getPropertyOptions{ + action: input.action, + }, + Path: fmt.Sprintf("/%s/%s", fileSystemName, path), + } + + req, err := c.Client.NewRequest(ctx, opts) + + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return resp, err + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return resp, err + } + + if resp.HttpResponse != nil { + resp.ResourceType = PathResource(resp.HttpResponse.Header.Get("x-ms-resource-type")) + resp.ETag = resp.HttpResponse.Header.Get("ETag") + + if lastModifiedRaw := resp.HttpResponse.Header.Get("Last-Modified"); lastModifiedRaw != "" { + lastModified, err := time.Parse(time.RFC1123, lastModifiedRaw) + if err != nil { + return GetPropertiesResponse{}, err + } + resp.LastModified = lastModified + } + + resp.Owner = resp.HttpResponse.Header.Get("x-ms-owner") + resp.Group = resp.HttpResponse.Header.Get("x-ms-group") + resp.ACL = resp.HttpResponse.Header.Get("x-ms-acl") + } + return +} + +type getPropertyOptions struct { + action GetPropertiesAction +} + +func (g getPropertyOptions) ToHeaders() *client.Headers { + return nil +} + +func (g getPropertyOptions) ToOData() *odata.Query { + return nil +} + +func (g getPropertyOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("action", string(g.action)) + return out +} diff --git a/storage/2023-11-03/datalakestore/paths/properties_set.go b/storage/2023-11-03/datalakestore/paths/properties_set.go new file mode 100644 index 0000000..055f199 --- /dev/null +++ b/storage/2023-11-03/datalakestore/paths/properties_set.go @@ -0,0 +1,103 @@ +package paths + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type SetAccessControlInput struct { + Owner *string + Group *string + ACL *string + + // Optional - A date and time value. + // Specify this header to perform the operation only if the resource has been modified since the specified date and time. + IfModifiedSince *string + + // Optional - A date and time value. + // Specify this header to perform the operation only if the resource has not been modified since the specified date and time. + IfUnmodifiedSince *string +} + +type SetPropertiesResponse struct { + HttpResponse *client.Response +} + +// SetProperties sets the access control properties for a Data Lake Store Gen2 Path within a Storage Account File System +func (c Client) SetAccessControl(ctx context.Context, fileSystemName string, path string, input SetAccessControlInput) (resp SetPropertiesResponse, err error) { + + if fileSystemName == "" { + return resp, fmt.Errorf("`fileSystemName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + OptionsObject: setPropertyOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", fileSystemName, path), + } + + req, err := c.Client.NewRequest(ctx, opts) + + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return resp, err + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return resp, err + } + + return +} + +type setPropertyOptions struct { + input SetAccessControlInput +} + +func (s setPropertyOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + if s.input.ACL != nil { + headers.Append("x-ms-acl", *s.input.ACL) + } + + if s.input.Owner != nil { + headers.Append("x-ms-owner", *s.input.Owner) + } + + if s.input.Group != nil { + headers.Append("x-ms-group", *s.input.Group) + } + + if s.input.IfModifiedSince != nil { + headers.Append("If-Modified-Since", *s.input.IfModifiedSince) + } + + if s.input.IfUnmodifiedSince != nil { + headers.Append("If-Unmodified-Since", *s.input.IfUnmodifiedSince) + } + + return headers +} + +func (s setPropertyOptions) ToOData() *odata.Query { + return nil +} + +func (s setPropertyOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("action", "setAccessControl") + return out +} diff --git a/storage/2023-11-03/datalakestore/paths/version.go b/storage/2023-11-03/datalakestore/paths/version.go new file mode 100644 index 0000000..70c22f9 --- /dev/null +++ b/storage/2023-11-03/datalakestore/paths/version.go @@ -0,0 +1,4 @@ +package paths + +const apiVersion = "2023-11-03" +const componentName = "datalakestore/paths" diff --git a/storage/2023-11-03/file/directories/README.md b/storage/2023-11-03/file/directories/README.md new file mode 100644 index 0000000..25474c8 --- /dev/null +++ b/storage/2023-11-03/file/directories/README.md @@ -0,0 +1,48 @@ +## File Storage Directories SDK for API version 2020-08-04 + +This package allows you to interact with the Directories File Storage API + +### Supported Authorizers + +* Azure Active Directory (for the Resource Endpoint `https://storage.azure.com`) +* SharedKeyLite (Blob, File & Queue) + +### Limitations + +* At this time the headers `x-ms-file-permission` and `x-ms-file-attributes` are hard-coded (to `inherit` and `None`, respectively). + +### Example Usage + +```go +package main + +import ( + "context" + "fmt" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/directories" +) + +func Example() error { + accountName := "storageaccount1" + storageAccountKey := "ABC123...." + shareName := "myshare" + directoryName := "myfiles" + + storageAuth := autorest.NewSharedKeyLiteAuthorizer(accountName, storageAccountKey) + directoriesClient := directories.New() + directoriesClient.Client.Authorizer = storageAuth + + ctx := context.TODO() + metadata := map[string]string{ + "hello": "world", + } + if _, err := directoriesClient.Create(ctx, accountName, shareName, directoryName, metadata); err != nil { + return fmt.Errorf("Error creating Directory: %s", err) + } + + return nil +} +``` \ No newline at end of file diff --git a/storage/2023-11-03/file/directories/api.go b/storage/2023-11-03/file/directories/api.go new file mode 100644 index 0000000..e462657 --- /dev/null +++ b/storage/2023-11-03/file/directories/api.go @@ -0,0 +1,13 @@ +package directories + +import ( + "context" +) + +type StorageDirectory interface { + Delete(ctx context.Context, shareName, path string) (resp DeleteResponse, err error) + GetMetaData(ctx context.Context, shareName, path string) (resp GetMetaDataResponse, err error) + SetMetaData(ctx context.Context, shareName, path string, input SetMetaDataInput) (resp SetMetaDataResponse, err error) + Create(ctx context.Context, shareName, path string, input CreateDirectoryInput) (resp CreateDirectoryResponse, err error) + Get(ctx context.Context, shareName, path string) (resp GetResponse, err error) +} diff --git a/storage/2023-11-03/file/directories/client.go b/storage/2023-11-03/file/directories/client.go new file mode 100644 index 0000000..86f9fd1 --- /dev/null +++ b/storage/2023-11-03/file/directories/client.go @@ -0,0 +1,22 @@ +package directories + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/dataplane/storage" +) + +// Client is the base client for File Storage Shares. +type Client struct { + Client *storage.BaseClient +} + +func NewWithBaseUri(baseUri string) (*Client, error) { + baseClient, err := storage.NewBaseClient(baseUri, componentName, apiVersion) + if err != nil { + return nil, fmt.Errorf("building base client: %+v", err) + } + return &Client{ + Client: baseClient, + }, nil +} diff --git a/storage/2023-11-03/file/directories/create.go b/storage/2023-11-03/file/directories/create.go new file mode 100644 index 0000000..7f63f9f --- /dev/null +++ b/storage/2023-11-03/file/directories/create.go @@ -0,0 +1,115 @@ +package directories + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type CreateDirectoryInput struct { + // The time at which this file was created at - if omitted, this'll be set to "now" + // This maps to the `x-ms-file-creation-time` field. + // ... Yes I know it says File not Directory, I didn't design the API. + CreatedAt *time.Time + + // The time at which this file was last modified - if omitted, this'll be set to "now" + // This maps to the `x-ms-file-last-write-time` field. + // ... Yes I know it says File not Directory, I didn't design the API. + LastModified *time.Time + + // MetaData is a mapping of key value pairs which should be assigned to this directory + MetaData map[string]string +} + +type CreateDirectoryResponse struct { + HttpResponse *client.Response +} + +// Create creates a new directory under the specified share or parent directory. +func (c Client) Create(ctx context.Context, shareName, path string, input CreateDirectoryInput) (resp CreateDirectoryResponse, err error) { + + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if err = metadata.Validate(input.MetaData); err != nil { + return resp, fmt.Errorf("`input.MetaData` is not valid: %s", err) + } + + if path == "" { + return resp, fmt.Errorf("`path` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: CreateOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s", shareName, path), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type CreateOptions struct { + input CreateDirectoryInput +} + +func (c CreateOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + if len(c.input.MetaData) > 0 { + headers.Merge(metadata.SetMetaDataHeaders(c.input.MetaData)) + } + + var coalesceDate = func(input *time.Time, defaultVal string) string { + if input == nil { + return defaultVal + } + return input.Format(time.RFC1123) + } + + // ... Yes I know these say File not Directory, I didn't design the API. + headers.Append("x-ms-file-permission", "inherit") // TODO: expose this in future + headers.Append("x-ms-file-attributes", "None") // TODO: expose this in future + headers.Append("x-ms-file-creation-time", coalesceDate(c.input.CreatedAt, "now")) + headers.Append("x-ms-file-last-write-time", coalesceDate(c.input.LastModified, "now")) + + return headers +} + +func (c CreateOptions) ToOData() *odata.Query { + return nil +} + +func (c CreateOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("restype", "directory") + return out +} diff --git a/storage/2023-11-03/file/directories/delete.go b/storage/2023-11-03/file/directories/delete.go new file mode 100644 index 0000000..fb96108 --- /dev/null +++ b/storage/2023-11-03/file/directories/delete.go @@ -0,0 +1,55 @@ +package directories + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" +) + +type DeleteResponse struct { + HttpResponse *client.Response +} + +// Delete removes the specified empty directory +// Note that the directory must be empty before it can be deleted. +func (c Client) Delete(ctx context.Context, shareName, path string) (resp DeleteResponse, err error) { + + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if path == "" { + return resp, fmt.Errorf("`path` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodDelete, + OptionsObject: directoriesOptions{}, + Path: fmt.Sprintf("/%s/%s", shareName, path), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} diff --git a/storage/2023-11-03/file/directories/get.go b/storage/2023-11-03/file/directories/get.go new file mode 100644 index 0000000..5c900c4 --- /dev/null +++ b/storage/2023-11-03/file/directories/get.go @@ -0,0 +1,69 @@ +package directories + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type GetResponse struct { + HttpResponse *client.Response + + // A set of name-value pairs that contain metadata for the directory. + MetaData map[string]string + + // The value of this header is set to true if the directory metadata is completely + // encrypted using the specified algorithm. Otherwise, the value is set to false. + DirectoryMetaDataEncrypted bool +} + +// Get returns all system properties for the specified directory, +// and can also be used to check the existence of a directory. +func (c Client) Get(ctx context.Context, shareName, path string) (resp GetResponse, err error) { + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if path == "" { + return resp, fmt.Errorf("`path` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: directoriesOptions{}, + Path: fmt.Sprintf("/%s/%s", shareName, path), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.MetaData = metadata.ParseFromHeaders(resp.HttpResponse.Header) + } + resp.DirectoryMetaDataEncrypted = strings.EqualFold(resp.HttpResponse.Header.Get("x-ms-server-encrypted"), "true") + } + + return +} diff --git a/storage/2023-11-03/file/directories/lifecycle_test.go b/storage/2023-11-03/file/directories/lifecycle_test.go new file mode 100644 index 0000000..663b6fb --- /dev/null +++ b/storage/2023-11-03/file/directories/lifecycle_test.go @@ -0,0 +1,123 @@ +package directories + +import ( + "context" + "fmt" + "log" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/shares" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +var StorageFile = Client{} + +func TestDirectoriesLifeCycle(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + shareName := fmt.Sprintf("share-%d", testhelpers.RandomInt()) + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + sharesClient, err := shares.NewWithBaseUri(fmt.Sprintf("https://%s.file.%s", accountName, *domainSuffix)) + if err := client.PrepareWithSharedKeyAuth(sharesClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + directoriesClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.file.%s", accountName, *domainSuffix)) + if err := client.PrepareWithSharedKeyAuth(directoriesClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + input := shares.CreateInput{ + QuotaInGB: 1, + } + _, err = sharesClient.Create(ctx, shareName, input) + if err != nil { + t.Fatalf("Error creating fileshare: %s", err) + } + defer sharesClient.Delete(ctx, shareName, shares.DeleteInput{DeleteSnapshots: true}) + + metaData := map[string]string{ + "hello": "world", + } + + log.Printf("[DEBUG] Creating Top Level..") + createInput := CreateDirectoryInput{ + MetaData: metaData, + } + if _, err := directoriesClient.Create(ctx, shareName, "hello", createInput); err != nil { + t.Fatalf("Error creating Top Level Directory: %s", err) + } + + log.Printf("[DEBUG] Creating Inner..") + if _, err := directoriesClient.Create(ctx, shareName, "hello/there", createInput); err != nil { + t.Fatalf("Error creating Inner Directory: %s", err) + } + + log.Printf("[DEBUG] Retrieving share") + innerDir, err := directoriesClient.Get(ctx, shareName, "hello/there") + if err != nil { + t.Fatalf("Error retrieving Inner Directory: %s", err) + } + + if innerDir.DirectoryMetaDataEncrypted != true { + t.Fatalf("Expected MetaData to be encrypted but got: %t", innerDir.DirectoryMetaDataEncrypted) + } + + if len(innerDir.MetaData) != 1 { + t.Fatalf("Expected MetaData to contain 1 item but got %d", len(innerDir.MetaData)) + } + if innerDir.MetaData["hello"] != "world" { + t.Fatalf("Expected MetaData `hello` to be `world`: %s", innerDir.MetaData["hello"]) + } + + log.Printf("[DEBUG] Setting MetaData") + updatedMetaData := map[string]string{ + "panda": "pops", + } + if _, err := directoriesClient.SetMetaData(ctx, shareName, "hello/there", SetMetaDataInput{MetaData: updatedMetaData}); err != nil { + t.Fatalf("Error updating MetaData: %s", err) + } + + log.Printf("[DEBUG] Retrieving MetaData") + retrievedMetaData, err := directoriesClient.GetMetaData(ctx, shareName, "hello/there") + if err != nil { + t.Fatalf("Error retrieving the updated metadata: %s", err) + } + if len(retrievedMetaData.MetaData) != 1 { + t.Fatalf("Expected the updated metadata to have 1 item but got %d", len(retrievedMetaData.MetaData)) + } + if retrievedMetaData.MetaData["panda"] != "pops" { + t.Fatalf("Expected the metadata `panda` to be `pops` but got %q", retrievedMetaData.MetaData["panda"]) + } + + t.Logf("[DEBUG] Deleting Inner..") + if _, err := directoriesClient.Delete(ctx, shareName, "hello/there"); err != nil { + t.Fatalf("Error deleting Inner Directory: %s", err) + } + + t.Logf("[DEBUG] Deleting Top Level..") + if _, err := directoriesClient.Delete(ctx, shareName, "hello"); err != nil { + t.Fatalf("Error deleting Top Level Directory: %s", err) + } +} diff --git a/storage/2023-11-03/file/directories/metadata_get.go b/storage/2023-11-03/file/directories/metadata_get.go new file mode 100644 index 0000000..1911d39 --- /dev/null +++ b/storage/2023-11-03/file/directories/metadata_get.go @@ -0,0 +1,80 @@ +package directories + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type GetMetaDataResponse struct { + HttpResponse *client.Response + + MetaData map[string]string +} + +// GetMetaData returns all user-defined metadata for the specified directory +func (c Client) GetMetaData(ctx context.Context, shareName, path string) (resp GetMetaDataResponse, err error) { + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if path == "" { + return resp, fmt.Errorf("`path` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: GetMetaDataOptions{}, + Path: fmt.Sprintf("/%s/%s", shareName, path), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.MetaData = metadata.ParseFromHeaders(resp.HttpResponse.Header) + } + } + + return +} + +type GetMetaDataOptions struct{} + +func (g GetMetaDataOptions) ToHeaders() *client.Headers { + return nil +} + +func (g GetMetaDataOptions) ToOData() *odata.Query { + return nil +} + +func (g GetMetaDataOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("restype", "directory") + out.Append("comp", "metadata") + return out +} diff --git a/storage/2023-11-03/file/directories/metadata_set.go b/storage/2023-11-03/file/directories/metadata_set.go new file mode 100644 index 0000000..965f5ff --- /dev/null +++ b/storage/2023-11-03/file/directories/metadata_set.go @@ -0,0 +1,90 @@ +package directories + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type SetMetaDataResponse struct { + HttpResponse *client.Response +} + +type SetMetaDataInput struct { + MetaData map[string]string +} + +// SetMetaData updates user defined metadata for the specified directory +func (c Client) SetMetaData(ctx context.Context, shareName, path string, input SetMetaDataInput) (resp SetMetaDataResponse, err error) { + + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if err := metadata.Validate(input.MetaData); err != nil { + return resp, fmt.Errorf("`metadata` is not valid: %s", err) + } + + if path == "" { + return resp, fmt.Errorf("`path` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPut, + OptionsObject: SetMetaDataOptions{ + metaData: input.MetaData, + }, + Path: fmt.Sprintf("/%s/%s", shareName, path), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type SetMetaDataOptions struct { + metaData map[string]string +} + +func (s SetMetaDataOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + if len(s.metaData) > 0 { + headers.Merge(metadata.SetMetaDataHeaders(s.metaData)) + } + return headers +} + +func (s SetMetaDataOptions) ToOData() *odata.Query { + return nil +} + +func (s SetMetaDataOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("restype", "directory") + out.Append("comp", "metadata") + return out +} diff --git a/storage/2023-11-03/file/directories/options.go b/storage/2023-11-03/file/directories/options.go new file mode 100644 index 0000000..9b324d5 --- /dev/null +++ b/storage/2023-11-03/file/directories/options.go @@ -0,0 +1,24 @@ +package directories + +import ( + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +var _ client.Options = directoriesOptions{} + +type directoriesOptions struct{} + +func (o directoriesOptions) ToHeaders() *client.Headers { + return &client.Headers{} +} + +func (directoriesOptions) ToOData() *odata.Query { + return nil +} + +func (directoriesOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("restype", "directory") + return out +} diff --git a/storage/2023-11-03/file/directories/version.go b/storage/2023-11-03/file/directories/version.go new file mode 100644 index 0000000..785860c --- /dev/null +++ b/storage/2023-11-03/file/directories/version.go @@ -0,0 +1,5 @@ +package directories + +// APIVersion is the version of the API used for all Storage API Operations +const apiVersion = "2023-11-03" +const componentName = "file/directories" diff --git a/storage/2023-11-03/file/files/README.md b/storage/2023-11-03/file/files/README.md new file mode 100644 index 0000000..6ce6bbb --- /dev/null +++ b/storage/2023-11-03/file/files/README.md @@ -0,0 +1,47 @@ +## File Storage Files SDK for API version 2020-08-04 + +This package allows you to interact with the Files File Storage API + +### Supported Authorizers + +* Azure Active Directory (for the Resource Endpoint `https://storage.azure.com`) +* SharedKeyLite (Blob, File & Queue) + +### Limitations + +* At this time the headers `x-ms-file-permission` and `x-ms-file-attributes` are hard-coded (to `inherit` and `None`, respectively). + +### Example Usage + +```go +package main + +import ( + "context" + "fmt" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/files" +) + +func Example() error { + accountName := "storageaccount1" + storageAccountKey := "ABC123...." + shareName := "myshare" + directoryName := "myfiles" + fileName := "example.txt" + + storageAuth := autorest.NewSharedKeyLiteAuthorizer(accountName, storageAccountKey) + filesClient := files.New() + filesClient.Client.Authorizer = storageAuth + + ctx := context.TODO() + input := files.CreateInput{} + if _, err := filesClient.Create(ctx, accountName, shareName, directoryName, fileName, input); err != nil { + return fmt.Errorf("Error creating File: %s", err) + } + + return nil +} +``` \ No newline at end of file diff --git a/storage/2023-11-03/file/files/api.go b/storage/2023-11-03/file/files/api.go new file mode 100644 index 0000000..fa31271 --- /dev/null +++ b/storage/2023-11-03/file/files/api.go @@ -0,0 +1,25 @@ +package files + +import ( + "context" + "os" + "time" +) + +type StorageFile interface { + PutByteRange(ctx context.Context, shareName string, path string, fileName string, input PutByteRangeInput) (PutRangeResponse, error) + GetByteRange(ctx context.Context, shareName string, path string, fileName string, input GetByteRangeInput) (GetByteRangeResponse, error) + ClearByteRange(ctx context.Context, shareName string, path string, fileName string, input ClearByteRangeInput) (ClearByteRangeResponse, error) + SetProperties(ctx context.Context, shareName string, path string, fileName string, input SetPropertiesInput) (SetPropertiesResponse, error) + PutFile(ctx context.Context, shareName string, path string, fileName string, file *os.File, parallelism int) error + Copy(ctx context.Context, shareName, path, fileName string, input CopyInput) (CopyResponse, error) + SetMetaData(ctx context.Context, shareName string, path string, fileName string, input SetMetaDataInput) (SetMetaDataResponse, error) + GetMetaData(ctx context.Context, shareName string, path string, fileName string) (GetMetaDataResponse, error) + AbortCopy(ctx context.Context, shareName string, path string, fileName string, input CopyAbortInput) (CopyAbortResponse, error) + GetFile(ctx context.Context, shareName string, path string, fileName string, input GetFileInput) (GetFileResponse, error) + ListRanges(ctx context.Context, shareName, path, fileName string) (ListRangesResponse, error) + GetProperties(ctx context.Context, shareName string, path string, fileName string) (GetResponse, error) + Delete(ctx context.Context, shareName string, path string, fileName string) (DeleteResponse, error) + Create(ctx context.Context, shareName string, path string, fileName string, input CreateInput) (CreateResponse, error) + CopyAndWait(ctx context.Context, shareName, path, fileName string, input CopyInput, pollDuration time.Duration) (CopyResponse, error) +} diff --git a/storage/2023-11-03/file/files/client.go b/storage/2023-11-03/file/files/client.go new file mode 100644 index 0000000..a875c77 --- /dev/null +++ b/storage/2023-11-03/file/files/client.go @@ -0,0 +1,22 @@ +package files + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/dataplane/storage" +) + +// Client is the base client for File Storage Shares. +type Client struct { + Client *storage.BaseClient +} + +func NewWithBaseUri(baseUri string) (*Client, error) { + baseClient, err := storage.NewBaseClient(baseUri, componentName, apiVersion) + if err != nil { + return nil, fmt.Errorf("building base client: %+v", err) + } + return &Client{ + Client: baseClient, + }, nil +} diff --git a/storage/2023-11-03/file/files/copy.go b/storage/2023-11-03/file/files/copy.go new file mode 100644 index 0000000..577f95d --- /dev/null +++ b/storage/2023-11-03/file/files/copy.go @@ -0,0 +1,116 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type CopyInput struct { + // Specifies the URL of the source file or blob, up to 2 KB in length. + // + // To copy a file to another file within the same storage account, you may use Shared Key to authenticate + // the source file. If you are copying a file from another storage account, or if you are copying a blob from + // the same storage account or another storage account, then you must authenticate the source file or blob using a + // shared access signature. If the source is a public blob, no authentication is required to perform the copy + // operation. A file in a share snapshot can also be specified as a copy source. + CopySource string + + MetaData map[string]string +} + +type CopyResponse struct { + HttpResponse *client.Response + + // The CopyID, which can be passed to AbortCopy to abort the copy. + CopyID string + + // Either `success` or `pending` + CopySuccess string +} + +// Copy copies a blob or file to a destination file within the storage account asynchronously. +func (c Client) Copy(ctx context.Context, shareName, path, fileName string, input CopyInput) (resp CopyResponse, err error) { + + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if fileName == "" { + return resp, fmt.Errorf("`fileName` cannot be an empty string") + } + + if input.CopySource == "" { + return resp, fmt.Errorf("`input.CopySource` cannot be an empty string") + } + + if err = metadata.Validate(input.MetaData); err != nil { + return resp, fmt.Errorf("`input.MetaData` is not valid: %s", err) + } + + if path != "" { + path = fmt.Sprintf("%s/", path) + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodPut, + OptionsObject: CopyOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s%s", shareName, path, fileName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.CopyID = resp.HttpResponse.Header.Get("x-ms-copy-id") + resp.CopySuccess = resp.HttpResponse.Header.Get("x-ms-copy-status") + } + } + + return +} + +type CopyOptions struct { + input CopyInput +} + +func (c CopyOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + if len(c.input.MetaData) > 0 { + headers.Merge(metadata.SetMetaDataHeaders(c.input.MetaData)) + } + headers.Append("x-ms-copy-source", c.input.CopySource) + return headers +} + +func (c CopyOptions) ToOData() *odata.Query { + return nil +} + +func (c CopyOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/file/files/copy_abort.go b/storage/2023-11-03/file/files/copy_abort.go new file mode 100644 index 0000000..5561160 --- /dev/null +++ b/storage/2023-11-03/file/files/copy_abort.go @@ -0,0 +1,90 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type CopyAbortInput struct { + copyID string +} + +type CopyAbortResponse struct { + HttpResponse *client.Response +} + +// AbortCopy aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata +func (c Client) AbortCopy(ctx context.Context, shareName, path, fileName string, input CopyAbortInput) (resp CopyAbortResponse, err error) { + + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if fileName == "" { + return resp, fmt.Errorf("`fileName` cannot be an empty string") + } + + if input.copyID == "" { + return resp, fmt.Errorf("`copyID` cannot be an empty string") + } + + if path != "" { + path = fmt.Sprintf("%s/", path) + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + }, + HttpMethod: http.MethodPut, + OptionsObject: CopyAbortOptions{ + copyId: input.copyID, + }, + Path: fmt.Sprintf("/%s/%s%s", shareName, path, fileName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type CopyAbortOptions struct { + copyId string +} + +func (c CopyAbortOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("x-ms-copy-action", "abort") + return headers +} + +func (c CopyAbortOptions) ToOData() *odata.Query { + return nil +} + +func (c CopyAbortOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "copy") + out.Append("copyid", c.copyId) + return out +} diff --git a/storage/2023-11-03/file/files/copy_wait.go b/storage/2023-11-03/file/files/copy_wait.go new file mode 100644 index 0000000..ebd0748 --- /dev/null +++ b/storage/2023-11-03/file/files/copy_wait.go @@ -0,0 +1,47 @@ +package files + +import ( + "context" + "fmt" + "strings" + "time" +) + +const DefaultCopyPollDuration = 15 * time.Second + +// CopyAndWait is a convenience method which doesn't exist in the API, which copies the file and then waits for the copy to complete +func (c Client) CopyAndWait(ctx context.Context, shareName, path, fileName string, input CopyInput, pollDuration time.Duration) (resp CopyResponse, err error) { + copy, e := c.Copy(ctx, shareName, path, fileName, input) + if err != nil { + resp.HttpResponse = copy.HttpResponse + err = fmt.Errorf("error copying: %s", e) + return + } + + resp.CopyID = copy.CopyID + + // since the API doesn't return a LRO, this is a hack which also polls every 10s, but should be sufficient + for true { + props, e := c.GetProperties(ctx, shareName, path, fileName) + if e != nil { + resp.HttpResponse = copy.HttpResponse + err = fmt.Errorf("error waiting for copy: %s", e) + return + } + + switch strings.ToLower(props.CopyStatus) { + case "pending": + time.Sleep(pollDuration) + continue + + case "success": + return + + default: + err = fmt.Errorf("Unexpected CopyState %q", e) + return + } + } + + return +} diff --git a/storage/2023-11-03/file/files/copy_wait_test.go b/storage/2023-11-03/file/files/copy_wait_test.go new file mode 100644 index 0000000..f69dbac --- /dev/null +++ b/storage/2023-11-03/file/files/copy_wait_test.go @@ -0,0 +1,150 @@ +package files + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/shares" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +func TestFilesCopyAndWaitFromURL(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + shareName := fmt.Sprintf("share-%d", testhelpers.RandomInt()) + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + sharesClient, err := shares.NewWithBaseUri(fmt.Sprintf("https://%s.file.%s", accountName, *domainSuffix)) + if err := client.PrepareWithSharedKeyAuth(sharesClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + input := shares.CreateInput{ + QuotaInGB: 10, + } + _, err = sharesClient.Create(ctx, shareName, input) + if err != nil { + t.Fatalf("Error creating fileshare: %s", err) + } + defer sharesClient.Delete(ctx, shareName, shares.DeleteInput{DeleteSnapshots: false}) + + filesClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.file.%s", accountName, *domainSuffix)) + if err := client.PrepareWithSharedKeyAuth(filesClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + copiedFileName := "ubuntu.iso" + copyInput := CopyInput{ + CopySource: "http://releases.ubuntu.com/14.04/ubuntu-14.04.6-desktop-amd64.iso", + } + + t.Logf("[DEBUG] Copy And Waiting..") + if _, err := filesClient.CopyAndWait(ctx, shareName, "", copiedFileName, copyInput, DefaultCopyPollDuration); err != nil { + t.Fatalf("Error copy & waiting: %s", err) + } + + t.Logf("[DEBUG] Asserting that the file's ready..") + + props, err := filesClient.GetProperties(ctx, shareName, "", copiedFileName) + if err != nil { + t.Fatalf("Error retrieving file: %s", err) + } + + if !strings.EqualFold(props.CopyStatus, "success") { + t.Fatalf("Expected the Copy Status to be `Success` but got %q", props.CopyStatus) + } +} + +func TestFilesCopyAndWaitFromBlob(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + shareName := fmt.Sprintf("share-%d", testhelpers.RandomInt()) + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + sharesClient, err := shares.NewWithBaseUri(fmt.Sprintf("https://%s.file.%s", accountName, *domainSuffix)) + if err := client.PrepareWithSharedKeyAuth(sharesClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + input := shares.CreateInput{ + QuotaInGB: 10, + } + _, err = sharesClient.Create(ctx, shareName, input) + if err != nil { + t.Fatalf("Error creating fileshare: %s", err) + } + defer sharesClient.Delete(ctx, shareName, shares.DeleteInput{DeleteSnapshots: false}) + + filesClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.file.%s", accountName, *domainSuffix)) + if err := client.PrepareWithSharedKeyAuth(filesClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + originalFileName := "ubuntu.iso" + copiedFileName := "ubuntu-copied.iso" + copyInput := CopyInput{ + CopySource: "http://releases.ubuntu.com/14.04/ubuntu-14.04.6-desktop-amd64.iso", + } + t.Logf("[DEBUG] Copy And Waiting the original file..") + if _, err := filesClient.CopyAndWait(ctx, shareName, "", originalFileName, copyInput, DefaultCopyPollDuration); err != nil { + t.Fatalf("Error copy & waiting: %s", err) + } + + t.Logf("[DEBUG] Now copying that blob..") + duplicateInput := CopyInput{ + CopySource: fmt.Sprintf("%s/%s/%s", endpoints.GetFileEndpoint(*domainSuffix, accountName), shareName, originalFileName), + } + if _, err := filesClient.CopyAndWait(ctx, shareName, "", copiedFileName, duplicateInput, DefaultCopyPollDuration); err != nil { + t.Fatalf("Error copying duplicate: %s", err) + } + + t.Logf("[DEBUG] Asserting that the file's ready..") + props, err := filesClient.GetProperties(ctx, shareName, "", copiedFileName) + if err != nil { + t.Fatalf("Error retrieving file: %s", err) + } + + if !strings.EqualFold(props.CopyStatus, "success") { + t.Fatalf("Expected the Copy Status to be `Success` but got %q", props.CopyStatus) + } +} diff --git a/storage/2023-11-03/file/files/create.go b/storage/2023-11-03/file/files/create.go new file mode 100644 index 0000000..7c08a85 --- /dev/null +++ b/storage/2023-11-03/file/files/create.go @@ -0,0 +1,158 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type CreateInput struct { + // This header specifies the maximum size for the file, up to 1 TiB. + ContentLength int64 + + // The MIME content type of the file + // If not specified, the default type is application/octet-stream. + ContentType *string + + // Specifies which content encodings have been applied to the file. + // This value is returned to the client when the Get File operation is performed + // on the file resource and can be used to decode file content. + ContentEncoding *string + + // Specifies the natural languages used by this resource. + ContentLanguage *string + + // The File service stores this value but does not use or modify it. + CacheControl *string + + // Sets the file's MD5 hash. + ContentMD5 *string + + // Sets the file’s Content-Disposition header. + ContentDisposition *string + + // The time at which this file was created at - if omitted, this'll be set to "now" + // This maps to the `x-ms-file-creation-time` field. + CreatedAt *time.Time + + // The time at which this file was last modified - if omitted, this'll be set to "now" + // This maps to the `x-ms-file-last-write-time` field. + LastModified *time.Time + + // MetaData is a mapping of key value pairs which should be assigned to this file + MetaData map[string]string +} + +type CreateResponse struct { + HttpResponse *client.Response +} + +// Create creates a new file or replaces a file. +func (c Client) Create(ctx context.Context, shareName, path, fileName string, input CreateInput) (resp CreateResponse, err error) { + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if fileName == "" { + return resp, fmt.Errorf("`fileName` cannot be an empty string") + } + + if err = metadata.Validate(input.MetaData); err != nil { + return resp, fmt.Errorf("`input.MetaData` is not valid: %s", err) + } + + if path != "" { + path = fmt.Sprintf("%s/", path) + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: CreateOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s%s", shareName, path, fileName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type CreateOptions struct { + input CreateInput +} + +func (c CreateOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + var coalesceDate = func(input *time.Time, defaultVal string) string { + if input == nil { + return defaultVal + } + + return input.Format(time.RFC1123) + } + + if len(c.input.MetaData) > 0 { + headers.Merge(metadata.SetMetaDataHeaders(c.input.MetaData)) + } + + headers.Append("x-ms-content-length", strconv.Itoa(int(c.input.ContentLength))) + headers.Append("x-ms-type", "file") + + headers.Append("x-ms-file-permission", "inherit") // TODO: expose this in future + headers.Append("x-ms-file-attributes", "None") // TODO: expose this in future + headers.Append("x-ms-file-creation-time", coalesceDate(c.input.CreatedAt, "now")) + headers.Append("x-ms-file-last-write-time", coalesceDate(c.input.LastModified, "now")) + + if c.input.ContentDisposition != nil { + headers.Append("x-ms-content-disposition", *c.input.ContentDisposition) + } + + if c.input.ContentEncoding != nil { + headers.Append("x-ms-content-encoding", *c.input.ContentEncoding) + } + + if c.input.ContentMD5 != nil { + headers.Append("x-ms-content-md5", *c.input.ContentMD5) + } + + if c.input.ContentType != nil { + headers.Append("x-ms-content-type", *c.input.ContentType) + } + + return headers +} + +func (c CreateOptions) ToOData() *odata.Query { + return nil +} + +func (c CreateOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/file/files/delete.go b/storage/2023-11-03/file/files/delete.go new file mode 100644 index 0000000..d5e3240 --- /dev/null +++ b/storage/2023-11-03/file/files/delete.go @@ -0,0 +1,58 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" +) + +type DeleteResponse struct { + HttpResponse *client.Response +} + +// Delete immediately deletes the file from the File Share. +func (c Client) Delete(ctx context.Context, shareName, path, fileName string) (resp DeleteResponse, err error) { + + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if fileName == "" { + return resp, fmt.Errorf("`fileName` cannot be an empty string") + } + + if path != "" { + path = fmt.Sprintf("/%s/", path) + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodDelete, + OptionsObject: nil, + Path: fmt.Sprintf("%s/%s%s", shareName, path, fileName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} diff --git a/storage/2023-11-03/file/files/lifecycle_test.go b/storage/2023-11-03/file/files/lifecycle_test.go new file mode 100644 index 0000000..14eca41 --- /dev/null +++ b/storage/2023-11-03/file/files/lifecycle_test.go @@ -0,0 +1,169 @@ +package files + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/shares" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +var _ StorageFile = Client{} + +func TestFilesLifeCycle(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + shareName := fmt.Sprintf("share-%d", testhelpers.RandomInt()) + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + sharesClient, err := shares.NewWithBaseUri(fmt.Sprintf("https://%s.file.%s", accountName, *domainSuffix)) + if err := client.PrepareWithSharedKeyAuth(sharesClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + input := shares.CreateInput{ + QuotaInGB: 1, + } + _, err = sharesClient.Create(ctx, shareName, input) + if err != nil { + t.Fatalf("Error creating fileshare: %s", err) + } + + defer sharesClient.Delete(ctx, shareName, shares.DeleteInput{DeleteSnapshots: false}) + + filesClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.file.%s", accountName, *domainSuffix)) + if err := client.PrepareWithSharedKeyAuth(filesClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + fileName := "bled5.png" + contentEncoding := "application/vnd+panda" + + t.Logf("[DEBUG] Creating Top Level File..") + createInput := CreateInput{ + ContentLength: 1024, + ContentEncoding: &contentEncoding, + } + if _, err := filesClient.Create(ctx, shareName, "", fileName, createInput); err != nil { + t.Fatalf("Error creating Top-Level File: %s", err) + } + + t.Logf("[DEBUG] Retrieving Properties for the Top-Level File..") + file, err := filesClient.GetProperties(ctx, shareName, "", fileName) + if err != nil { + t.Fatalf("Error retrieving Top-Level File: %s", err) + } + + if *file.ContentLength != 1024 { + t.Fatalf("Expected the Content-Length to be 1024 but got %d", *file.ContentLength) + } + + if file.ContentEncoding != contentEncoding { + t.Fatalf("Expected the Content-Encoding to be %q but got %q", contentEncoding, file.ContentEncoding) + } + + updatedSize := int64(2048) + updatedEncoding := "application/vnd+pandas2" + updatedInput := SetPropertiesInput{ + ContentEncoding: &updatedEncoding, + ContentLength: updatedSize, + MetaData: map[string]string{ + "bingo": "bango", + }, + } + t.Logf("[DEBUG] Setting Properties for the Top-Level File..") + if _, err := filesClient.SetProperties(ctx, shareName, "", fileName, updatedInput); err != nil { + t.Fatalf("Error setting properties: %s", err) + } + + t.Logf("[DEBUG] Re-retrieving Properties for the Top-Level File..") + file, err = filesClient.GetProperties(ctx, shareName, "", fileName) + if err != nil { + t.Fatalf("Error retrieving Top-Level File: %s", err) + } + + if *file.ContentLength != 2048 { + t.Fatalf("Expected the Content-Length to be 1024 but got %d", *file.ContentLength) + } + + if file.ContentEncoding != updatedEncoding { + t.Fatalf("Expected the Content-Encoding to be %q but got %q", updatedEncoding, file.ContentEncoding) + } + + if len(file.MetaData) != 1 { + t.Fatalf("Expected 1 item but got %d", len(file.MetaData)) + } + if file.MetaData["bingo"] != "bango" { + t.Fatalf("Expected `bingo` to be `bango` but got %q", file.MetaData["bingo"]) + } + + t.Logf("[DEBUG] Setting MetaData..") + metaData := map[string]string{ + "hello": "there", + } + if _, err := filesClient.SetMetaData(ctx, shareName, "", fileName, SetMetaDataInput{MetaData: metaData}); err != nil { + t.Fatalf("Error setting MetaData: %s", err) + } + + t.Logf("[DEBUG] Retrieving MetaData..") + retrievedMetaData, err := filesClient.GetMetaData(ctx, shareName, "", fileName) + if err != nil { + t.Fatalf("Error retrieving MetaData: %s", err) + } + if len(retrievedMetaData.MetaData) != 1 { + t.Fatalf("Expected 1 item but got %d", len(retrievedMetaData.MetaData)) + } + if retrievedMetaData.MetaData["hello"] != "there" { + t.Fatalf("Expected `hello` to be `there` but got %q", retrievedMetaData.MetaData["hello"]) + } + + t.Logf("[DEBUG] Re-Setting MetaData..") + metaData = map[string]string{ + "hello": "there", + "second": "thing", + } + if _, err := filesClient.SetMetaData(ctx, shareName, "", fileName, SetMetaDataInput{MetaData: metaData}); err != nil { + t.Fatalf("Error setting MetaData: %s", err) + } + + t.Logf("[DEBUG] Re-Retrieving MetaData..") + retrievedMetaData, err = filesClient.GetMetaData(ctx, shareName, "", fileName) + if err != nil { + t.Fatalf("Error retrieving MetaData: %s", err) + } + if len(retrievedMetaData.MetaData) != 2 { + t.Fatalf("Expected 2 items but got %d", len(retrievedMetaData.MetaData)) + } + if retrievedMetaData.MetaData["hello"] != "there" { + t.Fatalf("Expected `hello` to be `there` but got %q", retrievedMetaData.MetaData["hello"]) + } + if retrievedMetaData.MetaData["second"] != "thing" { + t.Fatalf("Expected `second` to be `thing` but got %q", retrievedMetaData.MetaData["second"]) + } + + t.Logf("[DEBUG] Deleting Top Level File..") + if _, err := filesClient.Delete(ctx, shareName, "", fileName); err != nil { + t.Fatalf("Error deleting Top-Level File: %s", err) + } +} diff --git a/storage/2023-11-03/file/files/metadata_get.go b/storage/2023-11-03/file/files/metadata_get.go new file mode 100644 index 0000000..3e0fbad --- /dev/null +++ b/storage/2023-11-03/file/files/metadata_get.go @@ -0,0 +1,83 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type GetMetaDataResponse struct { + HttpResponse *client.Response + + MetaData map[string]string +} + +// GetMetaData returns the MetaData for the specified File. +func (c Client) GetMetaData(ctx context.Context, shareName, path, fileName string) (resp GetMetaDataResponse, err error) { + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if fileName == "" { + return resp, fmt.Errorf("`fileName` cannot be an empty string") + } + + if path != "" { + path = fmt.Sprintf("%s/", path) + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: GetMetadataOptions{}, + Path: fmt.Sprintf("/%s/%s%s", shareName, path, fileName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.MetaData = metadata.ParseFromHeaders(resp.HttpResponse.Header) + } + } + + return +} + +type GetMetadataOptions struct{} + +func (m GetMetadataOptions) ToHeaders() *client.Headers { + return nil +} + +func (m GetMetadataOptions) ToOData() *odata.Query { + return nil +} + +func (m GetMetadataOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "metadata") + return out +} diff --git a/storage/2023-11-03/file/files/metadata_set.go b/storage/2023-11-03/file/files/metadata_set.go new file mode 100644 index 0000000..4087ef5 --- /dev/null +++ b/storage/2023-11-03/file/files/metadata_set.go @@ -0,0 +1,92 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type SetMetaDataResponse struct { + HttpResponse *client.Response +} + +type SetMetaDataInput struct { + MetaData map[string]string +} + +// SetMetaData updates the specified File to have the specified MetaData. +func (c Client) SetMetaData(ctx context.Context, shareName, path, fileName string, input SetMetaDataInput) (resp SetMetaDataResponse, err error) { + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if fileName == "" { + return resp, fmt.Errorf("`fileName` cannot be an empty string") + } + + if err = metadata.Validate(input.MetaData); err != nil { + return resp, fmt.Errorf("`input.MetaData` is not valid: %s", err) + } + + if path != "" { + path = fmt.Sprintf("/%s/", path) + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPut, + OptionsObject: SetMetaDataOptions{ + metaData: input.MetaData, + }, + Path: fmt.Sprintf("%s/%s%s", shareName, path, fileName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type SetMetaDataOptions struct { + metaData map[string]string +} + +func (s SetMetaDataOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + if len(s.metaData) > 0 { + headers.Merge(metadata.SetMetaDataHeaders(s.metaData)) + } + return headers +} + +func (s SetMetaDataOptions) ToOData() *odata.Query { + return nil +} + +func (s SetMetaDataOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "metadata") + return out +} diff --git a/storage/2023-11-03/file/files/properties_get.go b/storage/2023-11-03/file/files/properties_get.go new file mode 100644 index 0000000..6a21f77 --- /dev/null +++ b/storage/2023-11-03/file/files/properties_get.go @@ -0,0 +1,105 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type GetResponse struct { + HttpResponse *client.Response + + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + ContentLength *int64 + ContentMD5 string + ContentType string + CopyID string + CopyStatus string + CopySource string + CopyProgress string + CopyStatusDescription string + CopyCompletionTime string + Encrypted bool + + MetaData map[string]string +} + +// GetProperties returns the Properties for the specified file +func (c Client) GetProperties(ctx context.Context, shareName, path, fileName string) (resp GetResponse, err error) { + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if fileName == "" { + return resp, fmt.Errorf("`fileName` cannot be an empty string") + } + + if path != "" { + path = fmt.Sprintf("/%s/", path) + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodHead, + OptionsObject: nil, + Path: fmt.Sprintf("%s/%s%s", shareName, path, fileName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.CacheControl = resp.HttpResponse.Header.Get("Cache-Control") + resp.ContentDisposition = resp.HttpResponse.Header.Get("Content-Disposition") + resp.ContentEncoding = resp.HttpResponse.Header.Get("Content-Encoding") + resp.ContentLanguage = resp.HttpResponse.Header.Get("Content-Language") + resp.ContentMD5 = resp.HttpResponse.Header.Get("Content-MD5") + resp.ContentType = resp.HttpResponse.Header.Get("Content-Type") + resp.CopyID = resp.HttpResponse.Header.Get("x-ms-copy-id") + resp.CopyProgress = resp.HttpResponse.Header.Get("x-ms-copy-progress") + resp.CopySource = resp.HttpResponse.Header.Get("x-ms-copy-source") + resp.CopyStatus = resp.HttpResponse.Header.Get("x-ms-copy-status") + resp.CopyStatusDescription = resp.HttpResponse.Header.Get("x-ms-copy-status-description") + resp.CopyCompletionTime = resp.HttpResponse.Header.Get("x-ms-copy-completion-time") + resp.Encrypted = strings.EqualFold(resp.HttpResponse.Header.Get("x-ms-server-encrypted"), "true") + resp.MetaData = metadata.ParseFromHeaders(resp.HttpResponse.Header) + + contentLengthRaw := resp.HttpResponse.Header.Get("Content-Length") + if contentLengthRaw != "" { + contentLength, err := strconv.Atoi(contentLengthRaw) + if err != nil { + return resp, fmt.Errorf("error parsing %q for Content-Length as an integer: %s", contentLengthRaw, err) + } + contentLengthI64 := int64(contentLength) + resp.ContentLength = &contentLengthI64 + } + } + } + + return +} diff --git a/storage/2023-11-03/file/files/properties_set.go b/storage/2023-11-03/file/files/properties_set.go new file mode 100644 index 0000000..0e2c547 --- /dev/null +++ b/storage/2023-11-03/file/files/properties_set.go @@ -0,0 +1,173 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type SetPropertiesInput struct { + // Resizes a file to the specified size. + // If the specified byte value is less than the current size of the file, + // then all ranges above the specified byte value are cleared. + ContentLength int64 + + // Modifies the cache control string for the file. + // If this property is not specified on the request, then the property will be cleared for the file. + // Subsequent calls to Get File Properties will not return this property, + // unless it is explicitly set on the file again. + ContentControl *string + + // Sets the file’s Content-Disposition header. + // If this property is not specified on the request, then the property will be cleared for the file. + // Subsequent calls to Get File Properties will not return this property, + // unless it is explicitly set on the file again. + ContentDisposition *string + + // Sets the file's content encoding. + // If this property is not specified on the request, then the property will be cleared for the file. + // Subsequent calls to Get File Properties will not return this property, + // unless it is explicitly set on the file again. + ContentEncoding *string + + // Sets the file's content language. + // If this property is not specified on the request, then the property will be cleared for the file. + // Subsequent calls to Get File Properties will not return this property, + // unless it is explicitly set on the file again. + ContentLanguage *string + + // Sets the file's MD5 hash. + // If this property is not specified on the request, then the property will be cleared for the file. + // Subsequent calls to Get File Properties will not return this property, + // unless it is explicitly set on the file again. + ContentMD5 *string + + // Sets the file's content type. + // If this property is not specified on the request, then the property will be cleared for the file. + // Subsequent calls to Get File Properties will not return this property, + // unless it is explicitly set on the file again. + ContentType *string + + // The time at which this file was created at - if omitted, this'll be set to "now" + // This maps to the `x-ms-file-creation-time` field. + CreatedAt *time.Time + + // The time at which this file was last modified - if omitted, this'll be set to "now" + // This maps to the `x-ms-file-last-write-time` field. + LastModified *time.Time + + // MetaData is a mapping of key value pairs which should be assigned to this file + MetaData map[string]string +} + +type SetPropertiesResponse struct { + HttpResponse *client.Response +} + +// SetProperties sets the specified properties on the specified File +func (c Client) SetProperties(ctx context.Context, shareName, path, fileName string, input SetPropertiesInput) (resp SetPropertiesResponse, err error) { + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if fileName == "" { + return resp, fmt.Errorf("`fileName` cannot be an empty string") + } + + if path != "" { + path = fmt.Sprintf("/%s/", path) + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: SetPropertiesOptions{ + input: input, + }, + Path: fmt.Sprintf("%s/%s%s", shareName, path, fileName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type SetPropertiesOptions struct { + input SetPropertiesInput +} + +func (s SetPropertiesOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + var coalesceDate = func(input *time.Time, defaultVal string) string { + if input == nil { + return defaultVal + } + + return input.Format(time.RFC1123) + } + + headers.Append("x-ms-type", "file") + + headers.Append("x-ms-content-length", strconv.Itoa(int(s.input.ContentLength))) + headers.Append("x-ms-file-permission", "inherit") // TODO: expose this in future + headers.Append("x-ms-file-attributes", "None") // TODO: expose this in future + headers.Append("x-ms-file-creation-time", coalesceDate(s.input.CreatedAt, "now")) + headers.Append("x-ms-file-last-write-time", coalesceDate(s.input.LastModified, "now")) + + if s.input.ContentControl != nil { + headers.Append("x-ms-cache-control", *s.input.ContentControl) + } + if s.input.ContentDisposition != nil { + headers.Append("x-ms-content-disposition", *s.input.ContentDisposition) + } + if s.input.ContentEncoding != nil { + headers.Append("x-ms-content-encoding", *s.input.ContentEncoding) + } + if s.input.ContentLanguage != nil { + headers.Append("x-ms-content-language", *s.input.ContentLanguage) + } + if s.input.ContentMD5 != nil { + headers.Append("x-ms-content-md5", *s.input.ContentMD5) + } + if s.input.ContentType != nil { + headers.Append("x-ms-content-type", *s.input.ContentType) + } + + if len(s.input.MetaData) > 0 { + headers.Merge(metadata.SetMetaDataHeaders(s.input.MetaData)) + } + + return headers +} + +func (s SetPropertiesOptions) ToOData() *odata.Query { + return nil +} + +func (s SetPropertiesOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/file/files/range_clear.go b/storage/2023-11-03/file/files/range_clear.go new file mode 100644 index 0000000..c2391a1 --- /dev/null +++ b/storage/2023-11-03/file/files/range_clear.go @@ -0,0 +1,95 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type ClearByteRangeInput struct { + StartBytes int64 + EndBytes int64 +} + +type ClearByteRangeResponse struct { + HttpResponse *client.Response +} + +// ClearByteRange clears the specified Byte Range from within the specified File +func (c Client) ClearByteRange(ctx context.Context, shareName, path, fileName string, input ClearByteRangeInput) (resp ClearByteRangeResponse, err error) { + + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if fileName == "" { + return resp, fmt.Errorf("`fileName` cannot be an empty string") + } + + if input.StartBytes < 0 { + return resp, fmt.Errorf("`input.StartBytes` must be greater or equal to 0") + } + + if input.EndBytes <= 0 { + return resp, fmt.Errorf("`input.EndBytes` must be greater than 0") + } + + if path != "" { + path = fmt.Sprintf("%s/", path) + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: ClearByteRangeOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s%s", shareName, path, fileName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type ClearByteRangeOptions struct { + input ClearByteRangeInput +} + +func (c ClearByteRangeOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("x-ms-write", "clear") + headers.Append("x-ms-range", fmt.Sprintf("bytes=%d-%d", c.input.StartBytes, c.input.EndBytes)) + return headers +} + +func (c ClearByteRangeOptions) ToOData() *odata.Query { + return nil +} + +func (c ClearByteRangeOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "range") + return out +} diff --git a/storage/2023-11-03/file/files/range_get.go b/storage/2023-11-03/file/files/range_get.go new file mode 100644 index 0000000..90a1089 --- /dev/null +++ b/storage/2023-11-03/file/files/range_get.go @@ -0,0 +1,111 @@ +package files + +import ( + "context" + "fmt" + "io" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type GetByteRangeInput struct { + StartBytes int64 + EndBytes int64 +} + +type GetByteRangeResponse struct { + HttpResponse *client.Response + + Contents []byte +} + +// GetByteRange returns the specified Byte Range from the specified File. +func (c Client) GetByteRange(ctx context.Context, shareName, path, fileName string, input GetByteRangeInput) (resp GetByteRangeResponse, err error) { + + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if fileName == "" { + return resp, fmt.Errorf("`fileName` cannot be an empty string") + } + + if input.StartBytes < 0 { + return resp, fmt.Errorf("`input.StartBytes` must be greater or equal to 0") + } + + if input.EndBytes <= 0 { + return resp, fmt.Errorf("`input.EndBytes` must be greater than 0") + } + + expectedBytes := input.EndBytes - input.StartBytes + if expectedBytes < (4 * 1024) { + return resp, fmt.Errorf("requested Byte Range must be at least 4KB") + } + if expectedBytes > (4 * 1024 * 1024) { + return resp, fmt.Errorf("requested Byte Range must be at most 4MB") + } + + if path != "" { + path = fmt.Sprintf("%s/", path) + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusOK, + http.StatusPartialContent, + }, + HttpMethod: http.MethodGet, + OptionsObject: GetByteRangeOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/%s%s", shareName, path, fileName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + bytes, err := io.ReadAll(resp.HttpResponse.Body) + if err != nil { + return resp, fmt.Errorf("reading response body: %v", err) + } + resp.Contents = bytes + } + + return +} + +type GetByteRangeOptions struct { + input GetByteRangeInput +} + +func (g GetByteRangeOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("x-ms-range", fmt.Sprintf("bytes=%d-%d", g.input.StartBytes, g.input.EndBytes-1)) + return headers +} + +func (g GetByteRangeOptions) ToOData() *odata.Query { + return nil +} + +func (g GetByteRangeOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/file/files/range_get_file.go b/storage/2023-11-03/file/files/range_get_file.go new file mode 100644 index 0000000..f818526 --- /dev/null +++ b/storage/2023-11-03/file/files/range_get_file.go @@ -0,0 +1,139 @@ +package files + +import ( + "context" + "fmt" + "log" + "math" + "runtime" + "sync" + + "github.com/hashicorp/go-azure-sdk/sdk/client" +) + +type GetFileInput struct { + Parallelism int +} + +type GetFileResponse struct { + HttpResponse *client.Response + + OutputBytes []byte +} + +// GetFile is a helper method to download a file by chunking it automatically +func (c Client) GetFile(ctx context.Context, shareName, path, fileName string, input GetFileInput) (resp GetFileResponse, err error) { + + // first look up the file and check out how many bytes it is + file, e := c.GetProperties(ctx, shareName, path, fileName) + if err != nil { + resp.HttpResponse = file.HttpResponse + err = e + return + } + + if file.ContentLength == nil { + err = fmt.Errorf("Content-Length was nil") + return + } + + resp.HttpResponse = file.HttpResponse + length := *file.ContentLength + chunkSize := int64(4 * 1024 * 1024) // 4MB + + if chunkSize > length { + chunkSize = length + } + + // then split that up into chunks and retrieve it into the 'results' set + chunks := int(math.Ceil(float64(length) / float64(chunkSize))) + workerCount := input.Parallelism * runtime.NumCPU() + if workerCount > chunks { + workerCount = chunks + } + + var waitGroup sync.WaitGroup + waitGroup.Add(workerCount) + + results := make([]*downloadFileChunkResult, chunks) + errors := make(chan error, chunkSize) + + for i := 0; i < chunks; i++ { + go func(i int) { + log.Printf("[DEBUG] Downloading Chunk %d of %d", i+1, chunks) + + dfci := downloadFileChunkInput{ + thisChunk: i, + chunkSize: chunkSize, + fileSize: length, + } + + result, err := c.downloadFileChunk(ctx, shareName, path, fileName, dfci) + if err != nil { + errors <- err + waitGroup.Done() + return + } + + // if there's no error, we should have bytes, so this is safe + results[i] = result + + waitGroup.Done() + }(i) + } + waitGroup.Wait() + + // TODO: we should switch to hashicorp/multi-error here + if len(errors) > 0 { + err = fmt.Errorf("Error downloading file: %s", <-errors) + return + } + + // then finally put it all together, in order and return it + output := make([]byte, length) + for _, v := range results { + copy(output[v.startBytes:v.endBytes], v.bytes) + } + + resp.OutputBytes = output + return +} + +type downloadFileChunkInput struct { + thisChunk int + chunkSize int64 + fileSize int64 +} + +type downloadFileChunkResult struct { + startBytes int64 + endBytes int64 + bytes []byte +} + +func (c Client) downloadFileChunk(ctx context.Context, shareName, path, fileName string, input downloadFileChunkInput) (*downloadFileChunkResult, error) { + startBytes := input.chunkSize * int64(input.thisChunk) + endBytes := startBytes + input.chunkSize + + // the last chunk may exceed the size of the file + remaining := input.fileSize - startBytes + if input.chunkSize > remaining { + endBytes = startBytes + remaining + } + + getInput := GetByteRangeInput{ + StartBytes: startBytes, + EndBytes: endBytes, + } + result, err := c.GetByteRange(ctx, shareName, path, fileName, getInput) + if err != nil { + return nil, fmt.Errorf("error putting bytes: %s", err) + } + + output := downloadFileChunkResult{ + startBytes: startBytes, + endBytes: endBytes, + bytes: result.Contents, + } + return &output, nil +} diff --git a/storage/2023-11-03/file/files/range_get_file_test.go b/storage/2023-11-03/file/files/range_get_file_test.go new file mode 100644 index 0000000..e7eb958 --- /dev/null +++ b/storage/2023-11-03/file/files/range_get_file_test.go @@ -0,0 +1,119 @@ +package files + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/shares" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +func TestGetSmallFile(t *testing.T) { + // the purpose of this test is to verify that the small, single-chunked file gets downloaded correctly + testGetFile(t, "small-file.png", "image/png") +} + +func TestGetLargeFile(t *testing.T) { + // the purpose of this test is to verify that the large, multi-chunked file gets downloaded correctly + testGetFile(t, "blank-large-file.dmg", "application/x-apple-diskimage") +} + +func testGetFile(t *testing.T, fileName string, contentType string) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + shareName := fmt.Sprintf("share-%d", testhelpers.RandomInt()) + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + sharesClient, err := shares.NewWithBaseUri(fmt.Sprintf("https://%s.file.%s", accountName, *domainSuffix)) + if err := client.PrepareWithSharedKeyAuth(sharesClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + input := shares.CreateInput{ + QuotaInGB: 10, + } + _, err = sharesClient.Create(ctx, shareName, input) + if err != nil { + t.Fatalf("Error creating fileshare: %s", err) + } + defer sharesClient.Delete(ctx, shareName, shares.DeleteInput{DeleteSnapshots: false}) + + filesClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.file.%s", accountName, *domainSuffix)) + if err := client.PrepareWithSharedKeyAuth(filesClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + // store files outside of this directory, since they're reused + file, err := os.Open("../../../testdata/" + fileName) + if err != nil { + t.Fatalf("Error opening: %s", err) + } + + info, err := file.Stat() + if err != nil { + t.Fatalf("Error 'stat'-ing: %s", err) + } + + t.Logf("[DEBUG] Creating Top Level File..") + createFileInput := CreateInput{ + ContentLength: info.Size(), + ContentType: &contentType, + } + if _, err := filesClient.Create(ctx, shareName, "", fileName, createFileInput); err != nil { + t.Fatalf("Error creating Top-Level File: %s", err) + } + + t.Logf("[DEBUG] Uploading File..") + if err := filesClient.PutFile(ctx, shareName, "", fileName, file, 4); err != nil { + t.Fatalf("Error uploading File: %s", err) + } + + t.Logf("[DEBUG] Downloading file..") + resp, err := filesClient.GetFile(ctx, shareName, "", fileName, GetFileInput{Parallelism: 4}) + if err != nil { + t.Fatalf("Error downloading file: %s", err) + } + + t.Logf("[DEBUG] Asserting the files are the same size..") + expectedBytes := make([]byte, info.Size()) + file.Read(expectedBytes) + if len(expectedBytes) != len(resp.OutputBytes) { + t.Fatalf("Expected %d bytes but got %d", len(expectedBytes), len(resp.OutputBytes)) + } + + t.Logf("[DEBUG] Asserting the files are the same content-wise..") + // overkill, but it's this or shasum-ing + for i := int64(0); i < info.Size(); i++ { + if expectedBytes[i] != resp.OutputBytes[i] { + t.Fatalf("Expected byte %d to be %q but got %q", i, expectedBytes[i], resp.OutputBytes[i]) + } + } + + t.Logf("[DEBUG] Deleting Top Level File..") + if _, err := filesClient.Delete(ctx, shareName, "", fileName); err != nil { + t.Fatalf("Error deleting Top-Level File: %s", err) + } + +} diff --git a/storage/2023-11-03/file/files/range_put.go b/storage/2023-11-03/file/files/range_put.go new file mode 100644 index 0000000..7588f8b --- /dev/null +++ b/storage/2023-11-03/file/files/range_put.go @@ -0,0 +1,114 @@ +package files + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type PutByteRangeInput struct { + StartBytes int64 + EndBytes int64 + + // Content is the File Contents for the specified range + // which can be at most 4MB + Content []byte +} + +type PutRangeResponse struct { + HttpResponse *client.Response +} + +// PutByteRange puts the specified Byte Range in the specified File. +func (c Client) PutByteRange(ctx context.Context, shareName, path, fileName string, input PutByteRangeInput) (resp PutRangeResponse, err error) { + + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if fileName == "" { + return resp, fmt.Errorf("`fileName` cannot be an empty string") + } + + if input.StartBytes < 0 { + return resp, fmt.Errorf("`input.StartBytes` must be greater or equal to 0") + } + if input.EndBytes <= 0 { + return resp, fmt.Errorf("`input.EndBytes` must be greater than 0") + } + + expectedBytes := input.EndBytes - input.StartBytes + actualBytes := len(input.Content) + if expectedBytes != int64(actualBytes) { + return resp, fmt.Errorf(fmt.Sprintf("The specified byte-range (%d) didn't match the content size (%d).", expectedBytes, actualBytes)) + } + + if expectedBytes > (4 * 1024 * 1024) { + return resp, fmt.Errorf("specified Byte Range must be at most 4MB") + } + + if path != "" { + path = fmt.Sprintf("/%s/", path) + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: PutRangeOptions{ + input: input, + }, + Path: fmt.Sprintf("%s/%s%s", shareName, path, fileName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + req.Body = io.NopCloser(bytes.NewReader(input.Content)) + req.ContentLength = int64(len(input.Content)) + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type PutRangeOptions struct { + input PutByteRangeInput +} + +func (p PutRangeOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("x-ms-write", "update") + headers.Append("x-ms-range", fmt.Sprintf("bytes=%d-%d", p.input.StartBytes, p.input.EndBytes-1)) + headers.Append("Content-Length", strconv.Itoa(len(p.input.Content))) + return headers +} + +func (p PutRangeOptions) ToOData() *odata.Query { + return nil +} + +func (p PutRangeOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "range") + return out +} diff --git a/storage/2023-11-03/file/files/range_put_file.go b/storage/2023-11-03/file/files/range_put_file.go new file mode 100644 index 0000000..bc89370 --- /dev/null +++ b/storage/2023-11-03/file/files/range_put_file.go @@ -0,0 +1,109 @@ +package files + +import ( + "context" + "fmt" + "io" + "log" + "math" + "os" + "sync" +) + +// PutFile is a helper method which takes a file, and automatically chunks it up, rather than having to do this yourself +func (c Client) PutFile(ctx context.Context, shareName, path, fileName string, file *os.File, parallelism int) error { + fileInfo, err := file.Stat() + if err != nil { + return fmt.Errorf("error loading file info: %s", err) + } + + fileSize := fileInfo.Size() + chunkSize := 4 * 1024 * 1024 // 4MB + if chunkSize > int(fileSize) { + chunkSize = int(fileSize) + } + chunks := int(math.Ceil(float64(fileSize) / float64(chunkSize*1.0))) + + workerCount := parallelism + if workerCount > chunks { + workerCount = chunks + } + + var waitGroup sync.WaitGroup + waitGroup.Add(workerCount) + + jobs := make(chan int, workerCount) + errors := make(chan error, chunkSize) + + for i := 0; i < workerCount; i++ { + go func() { + for i := range jobs { + log.Printf("[DEBUG] Chunk %d of %d", i+1, chunks) + + uci := uploadChunkInput{ + thisChunk: i, + chunkSize: chunkSize, + fileSize: fileSize, + } + + _, err := c.uploadChunk(ctx, shareName, path, fileName, uci, file) + if err != nil { + errors <- err + } + } + waitGroup.Done() + }() + } + + for i := 0; i < chunks; i++ { + jobs <- i + } + close(jobs) + waitGroup.Wait() + + // TODO: we should switch to hashicorp/multi-error here + if len(errors) > 0 { + return fmt.Errorf("Error uploading file: %s", <-errors) + } + + return nil +} + +type uploadChunkInput struct { + thisChunk int + chunkSize int + fileSize int64 +} + +func (c Client) uploadChunk(ctx context.Context, shareName, path, fileName string, input uploadChunkInput, file *os.File) (result PutRangeResponse, err error) { + startBytes := int64(input.chunkSize * input.thisChunk) + endBytes := startBytes + int64(input.chunkSize) + + // the last size may exceed the size of the file + remaining := input.fileSize - startBytes + if int64(input.chunkSize) > remaining { + endBytes = startBytes + remaining + } + + bytesToRead := int(endBytes) - int(startBytes) + bytes := make([]byte, bytesToRead) + + _, err = file.ReadAt(bytes, startBytes) + if err != nil { + if err != io.EOF { + return result, fmt.Errorf("Error reading bytes: %s", err) + } + } + + putBytesInput := PutByteRangeInput{ + StartBytes: startBytes, + EndBytes: endBytes, + Content: bytes, + } + result, err = c.PutByteRange(ctx, shareName, path, fileName, putBytesInput) + if err != nil { + return result, fmt.Errorf("error putting bytes: %s", err) + } + + return +} diff --git a/storage/2023-11-03/file/files/range_put_file_test.go b/storage/2023-11-03/file/files/range_put_file_test.go new file mode 100644 index 0000000..ec4a857 --- /dev/null +++ b/storage/2023-11-03/file/files/range_put_file_test.go @@ -0,0 +1,102 @@ +package files + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/shares" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +func TestPutSmallFile(t *testing.T) { + // the purpose of this test is to ensure that a small file (< 4MB) is a single chunk + testPutFile(t, "small-file.png", "image/png") +} + +func TestPutLargeFile(t *testing.T) { + // the purpose of this test is to ensure that large files (> 4MB) are chunked + testPutFile(t, "blank-large-file.dmg", "application/x-apple-diskimage") +} + +func TestPutVerySmallFile(t *testing.T) { + // the purpose of this test is to ensure that a very small file (< 4KB) is a single chunk + testPutFile(t, "very-small.json", "application/json") +} + +func testPutFile(t *testing.T, fileName string, contentType string) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + shareName := fmt.Sprintf("share-%d", testhelpers.RandomInt()) + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + sharesClient, err := shares.NewWithBaseUri(fmt.Sprintf("https://%s.file.%s", accountName, *domainSuffix)) + if err := client.PrepareWithSharedKeyAuth(sharesClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + input := shares.CreateInput{ + QuotaInGB: 10, + } + _, err = sharesClient.Create(ctx, shareName, input) + if err != nil { + t.Fatalf("Error creating fileshare: %s", err) + } + defer sharesClient.Delete(ctx, shareName, shares.DeleteInput{DeleteSnapshots: false}) + + filesClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.file.%s", accountName, *domainSuffix)) + if err := client.PrepareWithSharedKeyAuth(filesClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + // store files outside of this directory, since they're reused + file, err := os.Open("../../../testdata/" + fileName) + if err != nil { + t.Fatalf("Error opening: %s", err) + } + + info, err := file.Stat() + if err != nil { + t.Fatalf("Error 'stat'-ing: %s", err) + } + + t.Logf("[DEBUG] Creating Top Level File..") + createFileInput := CreateInput{ + ContentLength: info.Size(), + ContentType: &contentType, + } + if _, err := filesClient.Create(ctx, shareName, "", fileName, createFileInput); err != nil { + t.Fatalf("Error creating Top-Level File: %s", err) + } + + t.Logf("[DEBUG] Uploading File..") + if err := filesClient.PutFile(ctx, shareName, "", fileName, file, 4); err != nil { + t.Fatalf("Error uploading File: %s", err) + } + + t.Logf("[DEBUG] Deleting Top Level File..") + if _, err := filesClient.Delete(ctx, shareName, "", fileName); err != nil { + t.Fatalf("Error deleting Top-Level File: %s", err) + } +} diff --git a/storage/2023-11-03/file/files/ranges_list.go b/storage/2023-11-03/file/files/ranges_list.go new file mode 100644 index 0000000..9a67cab --- /dev/null +++ b/storage/2023-11-03/file/files/ranges_list.go @@ -0,0 +1,90 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type ListRangesResponse struct { + HttpResponse *client.Response + + Ranges []Range `xml:"Range"` +} + +type Range struct { + Start string `xml:"Start"` + End string `xml:"End"` +} + +// ListRanges returns the list of valid ranges for the specified File. +func (c Client) ListRanges(ctx context.Context, shareName, path, fileName string) (resp ListRangesResponse, err error) { + + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if path == "" { + return resp, fmt.Errorf("`path` cannot be an empty string") + } + + if fileName == "" { + return resp, fmt.Errorf("`fileName` cannot be an empty string") + } + + if path != "" { + path = fmt.Sprintf("%s/", path) + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: ListRangeOptions{}, + Path: fmt.Sprintf("/%s/%s%s", shareName, path, fileName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + resp.HttpResponse.Unmarshal(&resp) + } + + return +} + +type ListRangeOptions struct{} + +func (l ListRangeOptions) ToHeaders() *client.Headers { + return nil +} + +func (l ListRangeOptions) ToOData() *odata.Query { + return nil +} + +func (l ListRangeOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "rangelist") + return out +} diff --git a/storage/2023-11-03/file/files/version.go b/storage/2023-11-03/file/files/version.go new file mode 100644 index 0000000..1d4bc38 --- /dev/null +++ b/storage/2023-11-03/file/files/version.go @@ -0,0 +1,4 @@ +package files + +const apiVersion = "2023-11-03" +const componentName = "file/files" diff --git a/storage/2023-11-03/file/shares/README.md b/storage/2023-11-03/file/shares/README.md new file mode 100644 index 0000000..ecc0f13 --- /dev/null +++ b/storage/2023-11-03/file/shares/README.md @@ -0,0 +1,43 @@ +## File Storage Shares SDK for API version 2020-08-04 + +This package allows you to interact with the Shares File Storage API + +### Supported Authorizers + +* Azure Active Directory (for the Resource Endpoint `https://storage.azure.com`) +* SharedKeyLite (Blob, File & Queue) + +### Example Usage + +```go +package main + +import ( + "context" + "fmt" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/shares" +) + +func Example() error { + accountName := "storageaccount1" + storageAccountKey := "ABC123...." + shareName := "myshare" + + storageAuth := autorest.NewSharedKeyLiteAuthorizer(accountName, storageAccountKey) + sharesClient := shares.New() + sharesClient.Client.Authorizer = storageAuth + + ctx := context.TODO() + input := shares.CreateInput{ + QuotaInGB: 2, + } + if _, err := sharesClient.Create(ctx, accountName, shareName, input); err != nil { + return fmt.Errorf("Error creating Share: %s", err) + } + + return nil +} +``` \ No newline at end of file diff --git a/storage/2023-11-03/file/shares/acl_get.go b/storage/2023-11-03/file/shares/acl_get.go new file mode 100644 index 0000000..56aa641 --- /dev/null +++ b/storage/2023-11-03/file/shares/acl_get.go @@ -0,0 +1,76 @@ +package shares + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type GetACLResult struct { + HttpResponse *client.Response + + SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"` +} + +// GetACL get the Access Control List for the specified Storage Share +func (c Client) GetACL(ctx context.Context, shareName string) (resp GetACLResult, err error) { + + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: getAclOptions{}, + Path: fmt.Sprintf("/%s", shareName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + err = resp.HttpResponse.Unmarshal(&resp) + if err != nil { + return resp, fmt.Errorf("unmarshalling response: %v", err) + } + } + return +} + +type getAclOptions struct { +} + +func (g getAclOptions) ToHeaders() *client.Headers { + return nil +} + +func (g getAclOptions) ToOData() *odata.Query { + return nil +} + +func (g getAclOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("restype", "share") + out.Append("comp", "acl") + return out +} diff --git a/storage/2023-11-03/file/shares/acl_set.go b/storage/2023-11-03/file/shares/acl_set.go new file mode 100644 index 0000000..33b7c84 --- /dev/null +++ b/storage/2023-11-03/file/shares/acl_set.go @@ -0,0 +1,89 @@ +package shares + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "io" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type SetAclResponse struct { + HttpResponse *client.Response +} + +type SetAclInput struct { + SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"` + + XMLName xml.Name `xml:"SignedIdentifiers"` +} + +// SetACL sets the specified Access Control List on the specified Storage Share +func (c Client) SetACL(ctx context.Context, shareName string, input SetAclInput) (resp SetAclResponse, err error) { + + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPut, + OptionsObject: setAclOptions{}, + Path: fmt.Sprintf("/%s", shareName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + b, err := xml.Marshal(&input) + if err != nil { + return resp, fmt.Errorf("marshalling input: %v", err) + } + withHeader := xml.Header + string(b) + bytesWithHeader := []byte(withHeader) + req.ContentLength = int64(len(bytesWithHeader)) + req.Header.Set("Content-Length", strconv.Itoa(len(bytesWithHeader))) + req.Body = io.NopCloser(bytes.NewReader(bytesWithHeader)) + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + return +} + +type setAclOptions struct { + SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"` +} + +func (s setAclOptions) ToHeaders() *client.Headers { + return nil +} + +func (s setAclOptions) ToOData() *odata.Query { + return nil +} + +func (s setAclOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("restype", "share") + out.Append("comp", "acl") + return out +} diff --git a/storage/2023-11-03/file/shares/api.go b/storage/2023-11-03/file/shares/api.go new file mode 100644 index 0000000..553fb48 --- /dev/null +++ b/storage/2023-11-03/file/shares/api.go @@ -0,0 +1,21 @@ +package shares + +import ( + "context" +) + +type StorageShare interface { + SetACL(ctx context.Context, shareName string, input SetAclInput) (SetAclResponse, error) + GetSnapshot(ctx context.Context, shareName string, input GetSnapshotPropertiesInput) (GetSnapshotPropertiesResponse, error) + GetStats(ctx context.Context, shareName string) (GetStatsResponse, error) + GetACL(ctx context.Context, shareName string) (GetACLResult, error) + SetMetaData(ctx context.Context, shareName string, input SetMetaDataInput) (SetMetaDataResponse, error) + GetMetaData(ctx context.Context, shareName string) (GetMetaDataResponse, error) + SetProperties(ctx context.Context, shareName string, properties ShareProperties) (SetPropertiesResponse, error) + DeleteSnapshot(ctx context.Context, accountName string, shareName string, shareSnapshot string) (DeleteSnapshotResponse, error) + CreateSnapshot(ctx context.Context, shareName string, input CreateSnapshotInput) (CreateSnapshotResponse, error) + GetResourceManagerResourceID(subscriptionID, resourceGroup, accountName, shareName string) string + GetProperties(ctx context.Context, shareName string) (GetPropertiesResult, error) + Delete(ctx context.Context, shareName string, input DeleteInput) (DeleteResponse, error) + Create(ctx context.Context, shareName string, input CreateInput) (CreateResponse, error) +} diff --git a/storage/2023-11-03/file/shares/client.go b/storage/2023-11-03/file/shares/client.go new file mode 100644 index 0000000..6e5fd15 --- /dev/null +++ b/storage/2023-11-03/file/shares/client.go @@ -0,0 +1,22 @@ +package shares + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/dataplane/storage" +) + +// Client is the base client for File Storage Shares. +type Client struct { + Client *storage.BaseClient +} + +func NewWithBaseUri(baseUri string) (*Client, error) { + baseClient, err := storage.NewBaseClient(baseUri, componentName, apiVersion) + if err != nil { + return nil, fmt.Errorf("building base client: %+v", err) + } + return &Client{ + Client: baseClient, + }, nil +} diff --git a/storage/2023-11-03/file/shares/create.go b/storage/2023-11-03/file/shares/create.go new file mode 100644 index 0000000..b018460 --- /dev/null +++ b/storage/2023-11-03/file/shares/create.go @@ -0,0 +1,119 @@ +package shares + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type AccessTier string + +const ( + TransactionOptimizedAccessTier AccessTier = "TransactionOptimized" + HotAccessTier AccessTier = "Hot" + CoolAccessTier AccessTier = "Cool" + PremiumAccessTier AccessTier = "Premium" +) + +type CreateInput struct { + // Specifies the maximum size of the share, in gigabytes. + // Must be greater than 0, and less than or equal to 5TB (5120). + QuotaInGB int + + // Specifies the enabled protocols on the share. If not specified, the default is SMB. + EnabledProtocol ShareProtocol + + MetaData map[string]string + + // Specifies the access tier of the share. + AccessTier *AccessTier +} + +type CreateResponse struct { + HttpResponse *client.Response +} + +// Create creates the specified Storage Share within the specified Storage Account +func (c Client) Create(ctx context.Context, shareName string, input CreateInput) (resp CreateResponse, err error) { + + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if input.QuotaInGB <= 0 || input.QuotaInGB > 102400 { + return resp, fmt.Errorf("`input.QuotaInGB` must be greater than 0, and less than/equal to 100TB (102400 GB)") + } + + if err = metadata.Validate(input.MetaData); err != nil { + return resp, fmt.Errorf("`input.MetaData` is not valid: %s", err) + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: CreateOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s", shareName), + } + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + return +} + +type CreateOptions struct { + input CreateInput +} + +func (c CreateOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + if len(c.input.MetaData) > 0 { + headers.Merge(metadata.SetMetaDataHeaders(c.input.MetaData)) + } + + protocol := SMB + if c.input.EnabledProtocol != "" { + protocol = c.input.EnabledProtocol + } + headers.Append("x-ms-enabled-protocols", string(protocol)) + + if c.input.AccessTier != nil { + headers.Append("x-ms-access-tier", string(*c.input.AccessTier)) + } + + headers.Append("x-ms-share-quota", strconv.Itoa(c.input.QuotaInGB)) + + return headers +} + +func (c CreateOptions) ToOData() *odata.Query { + return nil +} + +func (c CreateOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("restype", "share") + return out +} diff --git a/storage/2023-11-03/file/shares/delete.go b/storage/2023-11-03/file/shares/delete.go new file mode 100644 index 0000000..7ae11b3 --- /dev/null +++ b/storage/2023-11-03/file/shares/delete.go @@ -0,0 +1,77 @@ +package shares + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type DeleteResponse struct { + HttpResponse *client.Response +} + +type DeleteInput struct { + DeleteSnapshots bool +} + +// Delete deletes the specified Storage Share from within a Storage Account +func (c Client) Delete(ctx context.Context, shareName string, input DeleteInput) (resp DeleteResponse, err error) { + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodDelete, + OptionsObject: DeleteOptions{ + deleteSnapshots: input.DeleteSnapshots, + }, + Path: fmt.Sprintf("/%s", shareName), + } + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type DeleteOptions struct { + deleteSnapshots bool +} + +func (d DeleteOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + if d.deleteSnapshots { + headers.Append("x-ms-delete-snapshots", "include") + } + return headers +} + +func (d DeleteOptions) ToOData() *odata.Query { + return nil +} + +func (d DeleteOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("restype", "share") + return out +} diff --git a/storage/2023-11-03/file/shares/lifecycle_test.go b/storage/2023-11-03/file/shares/lifecycle_test.go new file mode 100644 index 0000000..29b6d19 --- /dev/null +++ b/storage/2023-11-03/file/shares/lifecycle_test.go @@ -0,0 +1,387 @@ +package shares + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +var _ StorageShare = Client{} + +func TestSharesLifecycle(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + shareName := fmt.Sprintf("share-%d", testhelpers.RandomInt()) + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindStorageVTwo) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + sharesClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.file.%s", accountName, *domainSuffix)) + if err := client.PrepareWithSharedKeyAuth(sharesClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + tier := CoolAccessTier + input := CreateInput{ + QuotaInGB: 1, + AccessTier: &tier, + } + _, err = sharesClient.Create(ctx, shareName, input) + if err != nil { + t.Fatalf("Error creating fileshare: %s", err) + } + + snapshot, err := sharesClient.CreateSnapshot(ctx, shareName, CreateSnapshotInput{}) + if err != nil { + t.Fatalf("Error taking snapshot: %s", err) + } + t.Logf("Snapshot Date Time: %s", snapshot.SnapshotDateTime) + + snapshotDetails, err := sharesClient.GetSnapshot(ctx, shareName, GetSnapshotPropertiesInput{snapshotShare: snapshot.SnapshotDateTime}) + if err != nil { + t.Fatalf("Error retrieving snapshot: %s", err) + } + + t.Logf("MetaData: %s", snapshotDetails.MetaData) + + _, err = sharesClient.DeleteSnapshot(ctx, accountName, shareName, snapshot.SnapshotDateTime) + if err != nil { + t.Fatalf("Error deleting snapshot: %s", err) + } + + stats, err := sharesClient.GetStats(ctx, shareName) + if err != nil { + t.Fatalf("Error retrieving stats: %s", err) + } + + if stats.ShareUsageBytes != 0 { + t.Fatalf("Expected `stats.ShareUsageBytes` to be 0 but got: %d", stats.ShareUsageBytes) + } + + share, err := sharesClient.GetProperties(ctx, shareName) + if err != nil { + t.Fatalf("Error retrieving share: %s", err) + } + if share.QuotaInGB != 1 { + t.Fatalf("Expected Quota to be 1 but got: %d", share.QuotaInGB) + } + if share.EnabledProtocol != SMB { + t.Fatalf("Expected EnabledProtocol to SMB but got: %s", share.EnabledProtocol) + } + if share.AccessTier == nil || *share.AccessTier != CoolAccessTier { + t.Fatalf("Expected AccessTier to be Cool but got: %v", share.AccessTier) + } + + newTier := HotAccessTier + quota := 5 + props := ShareProperties{ + AccessTier: &newTier, + QuotaInGb: "a, + } + _, err = sharesClient.SetProperties(ctx, shareName, props) + if err != nil { + t.Fatalf("Error updating quota: %s", err) + } + + share, err = sharesClient.GetProperties(ctx, shareName) + if err != nil { + t.Fatalf("Error retrieving share: %s", err) + } + if share.QuotaInGB != 5 { + t.Fatalf("Expected Quota to be 5 but got: %d", share.QuotaInGB) + } + + if share.AccessTier == nil || *share.AccessTier != HotAccessTier { + t.Fatalf("Expected AccessTier to be Hot but got: %v", share.AccessTier) + } + + updatedMetaData := map[string]string{ + "hello": "world", + } + + _, err = sharesClient.SetMetaData(ctx, shareName, SetMetaDataInput{MetaData: updatedMetaData}) + if err != nil { + t.Fatalf("Erorr setting metadata: %s", err) + } + + result, err := sharesClient.GetMetaData(ctx, shareName) + if err != nil { + t.Fatalf("Error retrieving metadata: %s", err) + } + + if result.MetaData["hello"] != "world" { + t.Fatalf("Expected metadata `hello` to be `world` but got: %q", result.MetaData["hello"]) + } + if len(result.MetaData) != 1 { + t.Fatalf("Expected metadata to be 1 item but got: %s", result.MetaData) + } + + acls, err := sharesClient.GetACL(ctx, shareName) + if err != nil { + t.Fatalf("Error retrieving ACL's: %s", err) + } + if len(acls.SignedIdentifiers) != 0 { + t.Fatalf("Expected 0 identifiers but got %d", len(acls.SignedIdentifiers)) + } + + updatedAcls := []SignedIdentifier{ + { + Id: "abc123", + AccessPolicy: AccessPolicy{ + Start: "2020-07-01T08:49:37.0000000Z", + Expiry: "2020-07-01T09:49:37.0000000Z", + Permission: "rwd", + }, + }, + { + Id: "bcd234", + AccessPolicy: AccessPolicy{ + Start: "2020-07-01T08:49:37.0000000Z", + Expiry: "2020-07-01T09:49:37.0000000Z", + Permission: "rwd", + }, + }, + } + _, err = sharesClient.SetACL(ctx, shareName, SetAclInput{SignedIdentifiers: updatedAcls}) + if err != nil { + t.Fatalf("Error setting ACL's: %s", err) + } + + acls, err = sharesClient.GetACL(ctx, shareName) + if err != nil { + t.Fatalf("Error retrieving ACL's: %s", err) + } + if len(acls.SignedIdentifiers) != 2 { + t.Fatalf("Expected 2 identifiers but got %d", len(acls.SignedIdentifiers)) + } + + _, err = sharesClient.Delete(ctx, shareName, DeleteInput{DeleteSnapshots: false}) + if err != nil { + t.Fatalf("Error deleting Share: %s", err) + } +} + +func TestSharesLifecycleLargeQuota(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + shareName := fmt.Sprintf("share-%d", testhelpers.RandomInt()) + + testData, err := client.BuildTestResourcesWithSku(ctx, resourceGroup, accountName, storageaccounts.KindFileStorage, storageaccounts.SkuNamePremiumLRS) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + sharesClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.file.%s", accountName, *domainSuffix)) + if err := client.PrepareWithSharedKeyAuth(sharesClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + input := CreateInput{ + QuotaInGB: 1001, + } + _, err = sharesClient.Create(ctx, shareName, input) + if err != nil { + t.Fatalf("Error creating fileshare: %s", err) + } + + snapshot, err := sharesClient.CreateSnapshot(ctx, shareName, CreateSnapshotInput{}) + if err != nil { + t.Fatalf("Error taking snapshot: %s", err) + } + t.Logf("Snapshot Date Time: %s", snapshot.SnapshotDateTime) + + snapshotDetails, err := sharesClient.GetSnapshot(ctx, shareName, GetSnapshotPropertiesInput{snapshotShare: snapshot.SnapshotDateTime}) + if err != nil { + t.Fatalf("Error retrieving snapshot: %s", err) + } + + t.Logf("MetaData: %s", snapshotDetails.MetaData) + + _, err = sharesClient.DeleteSnapshot(ctx, accountName, shareName, snapshot.SnapshotDateTime) + if err != nil { + t.Fatalf("Error deleting snapshot: %s", err) + } + + stats, err := sharesClient.GetStats(ctx, shareName) + if err != nil { + t.Fatalf("Error retrieving stats: %s", err) + } + + if stats.ShareUsageBytes != 0 { + t.Fatalf("Expected `stats.ShareUsageBytes` to be 0 but got: %d", stats.ShareUsageBytes) + } + + share, err := sharesClient.GetProperties(ctx, shareName) + if err != nil { + t.Fatalf("Error retrieving share: %s", err) + } + if share.QuotaInGB != 1001 { + t.Fatalf("Expected Quota to be 1001 but got: %d", share.QuotaInGB) + } + + newQuota := 6000 + props := ShareProperties{ + QuotaInGb: &newQuota, + } + _, err = sharesClient.SetProperties(ctx, shareName, props) + if err != nil { + t.Fatalf("Error updating quota: %s", err) + } + + share, err = sharesClient.GetProperties(ctx, shareName) + if err != nil { + t.Fatalf("Error retrieving share: %s", err) + } + if share.QuotaInGB != 6000 { + t.Fatalf("Expected Quota to be 6000 but got: %d", share.QuotaInGB) + } + + updatedMetaData := map[string]string{ + "hello": "world", + } + + _, err = sharesClient.SetMetaData(ctx, shareName, SetMetaDataInput{MetaData: updatedMetaData}) + if err != nil { + t.Fatalf("Erorr setting metadata: %s", err) + } + + result, err := sharesClient.GetMetaData(ctx, shareName) + if err != nil { + t.Fatalf("Error retrieving metadata: %s", err) + } + + if result.MetaData["hello"] != "world" { + t.Fatalf("Expected metadata `hello` to be `world` but got: %q", result.MetaData["hello"]) + } + if len(result.MetaData) != 1 { + t.Fatalf("Expected metadata to be 1 item but got: %s", result.MetaData) + } + + acls, err := sharesClient.GetACL(ctx, shareName) + if err != nil { + t.Fatalf("Error retrieving ACL's: %s", err) + } + if len(acls.SignedIdentifiers) != 0 { + t.Fatalf("Expected 0 identifiers but got %d", len(acls.SignedIdentifiers)) + } + + updatedAcls := []SignedIdentifier{ + { + Id: "abc123", + AccessPolicy: AccessPolicy{ + Start: "2020-07-01T08:49:37.0000000Z", + Expiry: "2020-07-01T09:49:37.0000000Z", + Permission: "rwd", + }, + }, + { + Id: "bcd234", + AccessPolicy: AccessPolicy{ + Start: "2020-07-01T08:49:37.0000000Z", + Expiry: "2020-07-01T09:49:37.0000000Z", + Permission: "rwd", + }, + }, + } + _, err = sharesClient.SetACL(ctx, shareName, SetAclInput{SignedIdentifiers: updatedAcls}) + if err != nil { + t.Fatalf("Error setting ACL's: %s", err) + } + + acls, err = sharesClient.GetACL(ctx, shareName) + if err != nil { + t.Fatalf("Error retrieving ACL's: %s", err) + } + if len(acls.SignedIdentifiers) != 2 { + t.Fatalf("Expected 2 identifiers but got %d", len(acls.SignedIdentifiers)) + } + + _, err = sharesClient.Delete(ctx, shareName, DeleteInput{DeleteSnapshots: false}) + if err != nil { + t.Fatalf("Error deleting Share: %s", err) + } +} + +func TestSharesLifecycleNFSProtocol(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + shareName := fmt.Sprintf("share-%d", testhelpers.RandomInt()) + + testData, err := client.BuildTestResourcesWithSku(ctx, resourceGroup, accountName, storageaccounts.KindFileStorage, storageaccounts.SkuNamePremiumLRS) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + sharesClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.file.%s", accountName, *domainSuffix)) + if err := client.PrepareWithSharedKeyAuth(sharesClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + input := CreateInput{ + QuotaInGB: 1000, + EnabledProtocol: NFS, + } + _, err = sharesClient.Create(ctx, shareName, input) + if err != nil { + t.Fatalf("Error creating fileshare: %s", err) + } + + share, err := sharesClient.GetProperties(ctx, shareName) + if err != nil { + t.Fatalf("Error retrieving share: %s", err) + } + if share.EnabledProtocol != NFS { + t.Fatalf(`Expected enabled protocol to be "NFS" but got: %q`, share.EnabledProtocol) + } + + _, err = sharesClient.Delete(ctx, shareName, DeleteInput{DeleteSnapshots: false}) + if err != nil { + t.Fatalf("Error deleting Share: %s", err) + } +} diff --git a/storage/2023-11-03/file/shares/metadata_get.go b/storage/2023-11-03/file/shares/metadata_get.go new file mode 100644 index 0000000..e31762b --- /dev/null +++ b/storage/2023-11-03/file/shares/metadata_get.go @@ -0,0 +1,75 @@ +package shares + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type GetMetaDataResponse struct { + HttpResponse *client.Response + + MetaData map[string]string +} + +// GetMetaData returns the MetaData associated with the specified Storage Share +func (c Client) GetMetaData(ctx context.Context, shareName string) (resp GetMetaDataResponse, err error) { + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: GetMetaDataOptions{}, + Path: fmt.Sprintf("/%s", shareName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.MetaData = metadata.ParseFromHeaders(resp.HttpResponse.Header) + } + } + + return +} + +type GetMetaDataOptions struct{} + +func (g GetMetaDataOptions) ToHeaders() *client.Headers { + return nil +} + +func (g GetMetaDataOptions) ToOData() *odata.Query { + return nil +} + +func (g GetMetaDataOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("restype", "share") + out.Append("comp", "metadata") + return out +} diff --git a/storage/2023-11-03/file/shares/metadata_set.go b/storage/2023-11-03/file/shares/metadata_set.go new file mode 100644 index 0000000..3a23d90 --- /dev/null +++ b/storage/2023-11-03/file/shares/metadata_set.go @@ -0,0 +1,82 @@ +package shares + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type SetMetaDataResponse struct { + HttpResponse *client.Response +} + +type SetMetaDataInput struct { + MetaData map[string]string +} + +// SetMetaData sets the MetaData on the specified Storage Share +func (c Client) SetMetaData(ctx context.Context, shareName string, input SetMetaDataInput) (resp SetMetaDataResponse, err error) { + + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if err := metadata.Validate(input.MetaData); err != nil { + return resp, fmt.Errorf("`metadata` is not valid: %v", err) + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPut, + OptionsObject: SetMetaDataOptions{ + metaData: input.MetaData, + }, + Path: fmt.Sprintf("/%s", shareName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type SetMetaDataOptions struct { + metaData map[string]string +} + +func (s SetMetaDataOptions) ToHeaders() *client.Headers { + headers := metadata.SetMetaDataHeaders(s.metaData) + return &headers +} + +func (s SetMetaDataOptions) ToOData() *odata.Query { + return nil +} + +func (s SetMetaDataOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("restype", "share") + out.Append("comp", "metadata") + return out +} diff --git a/storage/2023-11-03/file/shares/models.go b/storage/2023-11-03/file/shares/models.go new file mode 100644 index 0000000..3272716 --- /dev/null +++ b/storage/2023-11-03/file/shares/models.go @@ -0,0 +1,22 @@ +package shares + +type SignedIdentifier struct { + Id string `xml:"Id"` + AccessPolicy AccessPolicy `xml:"AccessPolicy"` +} + +type AccessPolicy struct { + Start string `xml:"Start"` + Expiry string `xml:"Expiry"` + Permission string `xml:"Permission"` +} + +type ShareProtocol string + +const ( + // SMB indicates the share can be accessed by SMBv3.0, SMBv2.1 and REST. + SMB ShareProtocol = "SMB" + + // NFS indicates the share can be accessed by NFSv4.1. A premium account is required for this option. + NFS ShareProtocol = "NFS" +) diff --git a/storage/2023-11-03/file/shares/options.go b/storage/2023-11-03/file/shares/options.go new file mode 100644 index 0000000..e2c1d5c --- /dev/null +++ b/storage/2023-11-03/file/shares/options.go @@ -0,0 +1,24 @@ +package shares + +import ( + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +var _ client.Options = sharesOptions{} + +type sharesOptions struct{} + +func (o sharesOptions) ToHeaders() *client.Headers { + return &client.Headers{} +} + +func (sharesOptions) ToOData() *odata.Query { + return nil +} + +func (sharesOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("restype", "share") + return out +} diff --git a/storage/2023-11-03/file/shares/properties_get.go b/storage/2023-11-03/file/shares/properties_get.go new file mode 100644 index 0000000..763584e --- /dev/null +++ b/storage/2023-11-03/file/shares/properties_get.go @@ -0,0 +1,82 @@ +package shares + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type GetPropertiesResult struct { + HttpResponse *client.Response + + MetaData map[string]string + QuotaInGB int + EnabledProtocol ShareProtocol + AccessTier *AccessTier +} + +// GetProperties returns the properties about the specified Storage Share +func (c Client) GetProperties(ctx context.Context, shareName string) (resp GetPropertiesResult, err error) { + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: sharesOptions{}, + Path: fmt.Sprintf("/%s", shareName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.MetaData = metadata.ParseFromHeaders(resp.HttpResponse.Header) + + quotaRaw := resp.HttpResponse.Header.Get("x-ms-share-quota") + if quotaRaw != "" { + quota, e := strconv.Atoi(quotaRaw) + if e != nil { + return resp, fmt.Errorf("error converting %q to an integer: %s", quotaRaw, err) + } + resp.QuotaInGB = quota + } + + protocol := SMB + if protocolRaw := resp.HttpResponse.Header.Get("x-ms-enabled-protocols"); protocolRaw != "" { + protocol = ShareProtocol(protocolRaw) + } + + if accessTierRaw := resp.HttpResponse.Header.Get("x-ms-access-tier"); accessTierRaw != "" { + tier := AccessTier(accessTierRaw) + resp.AccessTier = &tier + } + resp.EnabledProtocol = protocol + } + } + + return +} diff --git a/storage/2023-11-03/file/shares/properties_set.go b/storage/2023-11-03/file/shares/properties_set.go new file mode 100644 index 0000000..ef3854a --- /dev/null +++ b/storage/2023-11-03/file/shares/properties_set.go @@ -0,0 +1,90 @@ +package shares + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type ShareProperties struct { + QuotaInGb *int + AccessTier *AccessTier +} + +type SetPropertiesResponse struct { + HttpResponse *client.Response +} + +// SetProperties lets you update the Quota for the specified Storage Share +func (c Client) SetProperties(ctx context.Context, shareName string, properties ShareProperties) (resp SetPropertiesResponse, err error) { + + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if newQuotaGB := properties.QuotaInGb; newQuotaGB != nil && (*newQuotaGB <= 0 || *newQuotaGB > 102400) { + return resp, fmt.Errorf("`newQuotaGB` must be greater than 0, and less than/equal to 100TB (102400 GB)") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPut, + OptionsObject: SetPropertiesOptions{ + input: properties, + }, + Path: fmt.Sprintf("/%s", shareName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type SetPropertiesOptions struct { + input ShareProperties +} + +func (s SetPropertiesOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + if s.input.QuotaInGb != nil { + headers.Append("x-ms-share-quota", strconv.Itoa(*s.input.QuotaInGb)) + } + + if s.input.AccessTier != nil { + headers.Append("x-ms-access-tier", string(*s.input.AccessTier)) + } + return headers +} + +func (s SetPropertiesOptions) ToOData() *odata.Query { + return nil +} + +func (s SetPropertiesOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("restype", "share") + out.Append("comp", "properties") + return out +} diff --git a/storage/2023-11-03/file/shares/resource_id.go b/storage/2023-11-03/file/shares/resource_id.go new file mode 100644 index 0000000..13f8086 --- /dev/null +++ b/storage/2023-11-03/file/shares/resource_id.go @@ -0,0 +1,12 @@ +package shares + +import ( + "fmt" +) + +// GetResourceManagerResourceID returns the Resource Manager specific +// ResourceID for a specific Storage Share +func (c Client) GetResourceManagerResourceID(subscriptionID, resourceGroup, accountName, shareName string) string { + fmtStr := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Storage/storageAccounts/%s/fileServices/default/shares/%s" + return fmt.Sprintf(fmtStr, subscriptionID, resourceGroup, accountName, shareName) +} diff --git a/storage/2023-11-03/file/shares/resource_id_test.go b/storage/2023-11-03/file/shares/resource_id_test.go new file mode 100644 index 0000000..f6ff899 --- /dev/null +++ b/storage/2023-11-03/file/shares/resource_id_test.go @@ -0,0 +1,11 @@ +package shares + +import "testing" + +func TestGetResourceManagerResourceID(t *testing.T) { + actual := Client{}.GetResourceManagerResourceID("11112222-3333-4444-5555-666677778888", "group1", "account1", "share1") + expected := "/subscriptions/11112222-3333-4444-5555-666677778888/resourceGroups/group1/providers/Microsoft.Storage/storageAccounts/account1/fileServices/default/shares/share1" + if actual != expected { + t.Fatalf("Expected the Resource Manager Resource ID to be %q but got %q", expected, actual) + } +} diff --git a/storage/2023-11-03/file/shares/snapshot_create.go b/storage/2023-11-03/file/shares/snapshot_create.go new file mode 100644 index 0000000..8ba7a07 --- /dev/null +++ b/storage/2023-11-03/file/shares/snapshot_create.go @@ -0,0 +1,94 @@ +package shares + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type CreateSnapshotInput struct { + MetaData map[string]string +} + +type CreateSnapshotResponse struct { + HttpResponse *client.Response + + // This header is a DateTime value that uniquely identifies the share snapshot. + // The value of this header may be used in subsequent requests to access the share snapshot. + // This value is opaque. + SnapshotDateTime string +} + +// CreateSnapshot creates a read-only snapshot of the share +// A share can support creation of 200 share snapshots. Attempting to create more than 200 share snapshots fails with 409 (Conflict). +// Attempting to create a share snapshot while a previous Snapshot Share operation is in progress fails with 409 (Conflict). +func (c Client) CreateSnapshot(ctx context.Context, shareName string, input CreateSnapshotInput) (resp CreateSnapshotResponse, err error) { + + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if err = metadata.Validate(input.MetaData); err != nil { + return resp, fmt.Errorf("`input.MetaData` is not valid: %v", err) + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: snapShotCreateOptions{ + metaData: input.MetaData, + }, + Path: fmt.Sprintf("/%s", shareName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.SnapshotDateTime = resp.HttpResponse.Header.Get("x-ms-snapshot") + } + } + return +} + +type snapShotCreateOptions struct { + metaData map[string]string +} + +func (s snapShotCreateOptions) ToHeaders() *client.Headers { + headers := metadata.SetMetaDataHeaders(s.metaData) + return &headers +} + +func (s snapShotCreateOptions) ToOData() *odata.Query { + return nil +} + +func (s snapShotCreateOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("restype", "share") + out.Append("comp", "snapshot") + return out +} diff --git a/storage/2023-11-03/file/shares/snapshot_delete.go b/storage/2023-11-03/file/shares/snapshot_delete.go new file mode 100644 index 0000000..bf3ccd6 --- /dev/null +++ b/storage/2023-11-03/file/shares/snapshot_delete.go @@ -0,0 +1,76 @@ +package shares + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type DeleteSnapshotResponse struct { + HttpResponse *client.Response +} + +// DeleteSnapshot deletes the specified Snapshot of a Storage Share +func (c Client) DeleteSnapshot(ctx context.Context, accountName, shareName string, shareSnapshot string) (resp DeleteSnapshotResponse, err error) { + + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if shareSnapshot == "" { + return resp, fmt.Errorf("`shareSnapshot` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodDelete, + OptionsObject: snapShotDeleteOptions{ + shareSnapShot: shareSnapshot, + }, + Path: fmt.Sprintf("/%s", shareName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type snapShotDeleteOptions struct { + shareSnapShot string +} + +func (s snapShotDeleteOptions) ToHeaders() *client.Headers { + return nil +} + +func (s snapShotDeleteOptions) ToOData() *odata.Query { + return nil +} + +func (s snapShotDeleteOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("restype", "share") + out.Append("sharesnapshot", s.shareSnapShot) + return out +} diff --git a/storage/2023-11-03/file/shares/snapshot_get.go b/storage/2023-11-03/file/shares/snapshot_get.go new file mode 100644 index 0000000..f4a12e9 --- /dev/null +++ b/storage/2023-11-03/file/shares/snapshot_get.go @@ -0,0 +1,88 @@ +package shares + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type GetSnapshotPropertiesResponse struct { + HttpResponse *client.Response + + MetaData map[string]string +} + +type GetSnapshotPropertiesInput struct { + snapshotShare string +} + +// GetSnapshot gets information about the specified Snapshot of the specified Storage Share +func (c Client) GetSnapshot(ctx context.Context, shareName string, input GetSnapshotPropertiesInput) (resp GetSnapshotPropertiesResponse, err error) { + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + if input.snapshotShare == "" { + return resp, fmt.Errorf("`snapshotShare` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: snapShotGetOptions{ + snapshotShare: input.snapshotShare, + }, + Path: fmt.Sprintf("/%s", shareName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.MetaData = metadata.ParseFromHeaders(resp.HttpResponse.Header) + } + } + + return +} + +type snapShotGetOptions struct { + snapshotShare string +} + +func (s snapShotGetOptions) ToHeaders() *client.Headers { + return nil +} + +func (s snapShotGetOptions) ToOData() *odata.Query { + return nil +} + +func (s snapShotGetOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("restype", "share") + out.Append("snapshot", s.snapshotShare) + return out +} diff --git a/storage/2023-11-03/file/shares/stats.go b/storage/2023-11-03/file/shares/stats.go new file mode 100644 index 0000000..23e48af --- /dev/null +++ b/storage/2023-11-03/file/shares/stats.go @@ -0,0 +1,77 @@ +package shares + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type GetStatsResponse struct { + HttpResponse *client.Response + + // The approximate size of the data stored on the share. + // Note that this value may not include all recently created or recently resized files. + ShareUsageBytes int64 `xml:"ShareUsageBytes"` +} + +// GetStats returns information about the specified Storage Share +func (c Client) GetStats(ctx context.Context, shareName string) (resp GetStatsResponse, err error) { + if shareName == "" { + return resp, fmt.Errorf("`shareName` cannot be an empty string") + } + + if strings.ToLower(shareName) != shareName { + return resp, fmt.Errorf("`shareName` must be a lower-cased string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: statsOptions{}, + Path: shareName, + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + err = resp.HttpResponse.Unmarshal(&resp) + if err != nil { + return resp, fmt.Errorf("unmarshalling response: %v", err) + } + } + return +} + +type statsOptions struct{} + +func (s statsOptions) ToHeaders() *client.Headers { + return nil +} + +func (s statsOptions) ToOData() *odata.Query { + return nil +} + +func (s statsOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("restype", "share") + out.Append("comp", "stats") + return out +} diff --git a/storage/2023-11-03/file/shares/version.go b/storage/2023-11-03/file/shares/version.go new file mode 100644 index 0000000..6f62a9b --- /dev/null +++ b/storage/2023-11-03/file/shares/version.go @@ -0,0 +1,4 @@ +package shares + +const apiVersion = "2023-11-03" +const componentName = "file/shares" diff --git a/storage/2023-11-03/queue/messages/README.md b/storage/2023-11-03/queue/messages/README.md new file mode 100644 index 0000000..c6df9df --- /dev/null +++ b/storage/2023-11-03/queue/messages/README.md @@ -0,0 +1,43 @@ +## Queue Storage Messages SDK for API version 2020-08-04 + +This package allows you to interact with the Messages Queue Storage API + +### Supported Authorizers + +* Azure Active Directory (for the Resource Endpoint `https://storage.azure.com`) +* SharedKeyLite (Blob, File & Queue) + +### Example Usage + +```go +package main + +import ( + "context" + "fmt" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/queue/messages" +) + +func Example() error { + accountName := "storageaccount1" + storageAccountKey := "ABC123...." + queueName := "myqueue" + + storageAuth := autorest.NewSharedKeyLiteAuthorizer(accountName, storageAccountKey) + messagesClient := messages.New() + messagesClient.Client.Authorizer = storageAuth + + ctx := context.TODO() + input := messages.PutInput{ + Message: "hello", + } + if _, err := messagesClient.Put(ctx, accountName, queueName, input); err != nil { + return fmt.Errorf("Error creating Message: %s", err) + } + + return nil +} +``` \ No newline at end of file diff --git a/storage/2023-11-03/queue/messages/api.go b/storage/2023-11-03/queue/messages/api.go new file mode 100644 index 0000000..2ef1072 --- /dev/null +++ b/storage/2023-11-03/queue/messages/api.go @@ -0,0 +1,13 @@ +package messages + +import ( + "context" +) + +type StorageQueueMessage interface { + Delete(ctx context.Context, queueName string, messageID string, input DeleteInput) (DeleteResponse, error) + Peek(ctx context.Context, queueName string, input PeekInput) (QueueMessagesListResponse, error) + Put(ctx context.Context, queueName string, input PutInput) (QueueMessagesListResponse, error) + Get(ctx context.Context, queueName string, input GetInput) (QueueMessagesListResponse, error) + Update(ctx context.Context, queueName string, messageID string, input UpdateInput) (UpdateResponse, error) +} diff --git a/storage/2023-11-03/queue/messages/client.go b/storage/2023-11-03/queue/messages/client.go new file mode 100644 index 0000000..ec5d64c --- /dev/null +++ b/storage/2023-11-03/queue/messages/client.go @@ -0,0 +1,22 @@ +package messages + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/dataplane/storage" +) + +// Client is the base client for Messages. +type Client struct { + Client *storage.BaseClient +} + +func NewWithBaseUri(baseUri string) (*Client, error) { + baseClient, err := storage.NewBaseClient(baseUri, componentName, apiVersion) + if err != nil { + return nil, fmt.Errorf("building base client: %+v", err) + } + return &Client{ + Client: baseClient, + }, nil +} diff --git a/storage/2023-11-03/queue/messages/delete.go b/storage/2023-11-03/queue/messages/delete.go new file mode 100644 index 0000000..870b3b7 --- /dev/null +++ b/storage/2023-11-03/queue/messages/delete.go @@ -0,0 +1,83 @@ +package messages + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type DeleteResponse struct { + HttpResponse *client.Response +} + +type DeleteInput struct { + PopReceipt string +} + +// Delete deletes a specific message +func (c Client) Delete(ctx context.Context, queueName, messageID string, input DeleteInput) (resp DeleteResponse, err error) { + + if queueName == "" { + return resp, fmt.Errorf("`queueName` cannot be an empty string") + } + + if strings.ToLower(queueName) != queueName { + return resp, fmt.Errorf("`queueName` must be a lower-cased string") + } + + if messageID == "" { + return resp, fmt.Errorf("`messageID` cannot be an empty string") + } + + if input.PopReceipt == "" { + return resp, fmt.Errorf("`input.PopReceipt` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + OptionsObject: deleteOptions{ + popReceipt: input.PopReceipt, + }, + Path: fmt.Sprintf("/%s/messages/%s", queueName, messageID), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type deleteOptions struct { + popReceipt string +} + +func (d deleteOptions) ToHeaders() *client.Headers { + return nil +} + +func (d deleteOptions) ToOData() *odata.Query { + return nil +} + +func (d deleteOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("popreceipt", d.popReceipt) + return out +} diff --git a/storage/2023-11-03/queue/messages/get.go b/storage/2023-11-03/queue/messages/get.go new file mode 100644 index 0000000..ff229bf --- /dev/null +++ b/storage/2023-11-03/queue/messages/get.go @@ -0,0 +1,102 @@ +package messages + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type GetInput struct { + // VisibilityTimeout specifies the new visibility timeout value, in seconds, relative to server time. + // The new value must be larger than or equal to 0, and cannot be larger than 7 days. + VisibilityTimeout *int + + // NumberOfMessages specifies the (maximum) number of messages that should be retrieved from the queue. + // This can be a maximum of 32. + NumberOfMessages int +} + +type GetResponse struct { + HttpResponse *client.Response +} + +// Get retrieves one or more messages from the front of the queue +func (c Client) Get(ctx context.Context, queueName string, input GetInput) (resp QueueMessagesListResponse, err error) { + if queueName == "" { + return resp, fmt.Errorf("`queueName` cannot be an empty string") + } + if strings.ToLower(queueName) != queueName { + return resp, fmt.Errorf("`queueName` must be a lower-cased string") + } + if input.NumberOfMessages < 1 || input.NumberOfMessages > 32 { + return resp, fmt.Errorf("`input.NumberOfMessages` must be between 1 and 32") + } + if input.VisibilityTimeout != nil { + t := *input.VisibilityTimeout + maxTime := (time.Hour * 24 * 7).Seconds() + if t < 1 || t < int(maxTime) { + return resp, fmt.Errorf("`input.VisibilityTimeout` must be larger than or equal to 1 second, and cannot be larger than 7 days") + } + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: getOptions{ + visibilityTimeout: input.VisibilityTimeout, + numberOfMessages: input.NumberOfMessages, + }, + Path: fmt.Sprintf("/%s/messages", queueName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if err = resp.HttpResponse.Unmarshal(&resp); err != nil { + return resp, fmt.Errorf("unmarshalling response: %+v", err) + } + } + + return +} + +type getOptions struct { + visibilityTimeout *int + numberOfMessages int +} + +func (g getOptions) ToHeaders() *client.Headers { + return nil +} + +func (g getOptions) ToOData() *odata.Query { + return nil +} + +func (g getOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + if g.visibilityTimeout != nil { + out.Append("visibilitytimeout", strconv.Itoa(*g.visibilityTimeout)) + } + out.Append("numofmessages", strconv.Itoa(g.numberOfMessages)) + return out +} diff --git a/storage/2023-11-03/queue/messages/lifecycle_test.go b/storage/2023-11-03/queue/messages/lifecycle_test.go new file mode 100644 index 0000000..fd5e22d --- /dev/null +++ b/storage/2023-11-03/queue/messages/lifecycle_test.go @@ -0,0 +1,116 @@ +package messages + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/queue/queues" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +var _ StorageQueueMessage = Client{} + +func TestLifeCycle(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + queueName := fmt.Sprintf("queue-%d", testhelpers.RandomInt()) + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + queuesClient, err := queues.NewWithBaseUri(fmt.Sprintf("https://%s.%s.%s", accountName, "queue", *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(queuesClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + messagesClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.%s.%s", accountName, "queue", *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(messagesClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + _, err = queuesClient.Create(ctx, queueName, queues.CreateInput{MetaData: map[string]string{}}) + if err != nil { + t.Fatalf("Error creating queue: %s", err) + } + defer queuesClient.Delete(ctx, queueName) + + input := PutInput{ + Message: "ohhai", + } + putResp, err := messagesClient.Put(ctx, queueName, input) + if err != nil { + t.Fatalf("Error putting message in queue: %s", err) + } + + messageId := (*putResp.QueueMessages)[0].MessageId + popReceipt := (*putResp.QueueMessages)[0].PopReceipt + + _, err = messagesClient.Update(ctx, queueName, messageId, UpdateInput{ + PopReceipt: popReceipt, + Message: "Updated message", + VisibilityTimeout: 65, + }) + if err != nil { + t.Fatalf("Error updating: %s", err) + } + + for i := 0; i < 5; i++ { + input := PutInput{ + Message: fmt.Sprintf("Message %d", i), + } + _, err := messagesClient.Put(ctx, queueName, input) + if err != nil { + t.Fatalf("Error putting message %d in queue: %s", i, err) + } + } + + peakedMessages, err := messagesClient.Peek(ctx, queueName, PeekInput{NumberOfMessages: 3}) + if err != nil { + t.Fatalf("Error peaking messages: %s", err) + } + + for _, v := range *peakedMessages.QueueMessages { + t.Logf("Message: %q", v.MessageId) + } + + retrievedMessages, err := messagesClient.Get(ctx, queueName, GetInput{NumberOfMessages: 6}) + if err != nil { + t.Fatalf("Error retrieving messages: %s", err) + } + + for _, v := range *retrievedMessages.QueueMessages { + t.Logf("Message: %q", v.MessageId) + + _, err = messagesClient.Delete(ctx, queueName, v.MessageId, DeleteInput{PopReceipt: v.PopReceipt}) + if err != nil { + t.Fatalf("Error deleting message from queue: %s", err) + } + } +} diff --git a/storage/2023-11-03/queue/messages/models.go b/storage/2023-11-03/queue/messages/models.go new file mode 100644 index 0000000..eb450f0 --- /dev/null +++ b/storage/2023-11-03/queue/messages/models.go @@ -0,0 +1,23 @@ +package messages + +import ( + "github.com/hashicorp/go-azure-sdk/sdk/client" +) + +type QueueMessage struct { + MessageText string `xml:"MessageText"` +} + +type QueueMessagesListResponse struct { + HttpResponse *client.Response + + QueueMessages *[]QueueMessageResponse `xml:"QueueMessage"` +} + +type QueueMessageResponse struct { + MessageId string `xml:"MessageId"` + InsertionTime string `xml:"InsertionTime"` + ExpirationTime string `xml:"ExpirationTime"` + PopReceipt string `xml:"PopReceipt"` + TimeNextVisible string `xml:"TimeNextVisible"` +} diff --git a/storage/2023-11-03/queue/messages/peek.go b/storage/2023-11-03/queue/messages/peek.go new file mode 100644 index 0000000..c35157d --- /dev/null +++ b/storage/2023-11-03/queue/messages/peek.go @@ -0,0 +1,85 @@ +package messages + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type PeekInput struct { + // NumberOfMessages specifies the (maximum) number of messages that should be peak'd from the front of the queue. + // This can be a maximum of 32. + NumberOfMessages int +} + +// Peek retrieves one or more messages from the front of the queue, but doesn't alter the visibility of the messages +func (c Client) Peek(ctx context.Context, queueName string, input PeekInput) (resp QueueMessagesListResponse, err error) { + + if queueName == "" { + return resp, fmt.Errorf("`queueName` cannot be an empty string") + } + + if strings.ToLower(queueName) != queueName { + return resp, fmt.Errorf("`queueName` must be a lower-cased string") + } + + if input.NumberOfMessages < 1 || input.NumberOfMessages > 32 { + return resp, fmt.Errorf("`input.NumberOfMessages` must be between 1 and 32") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: peekOptions{ + numberOfMessages: input.NumberOfMessages, + }, + Path: fmt.Sprintf("/%s/messages", queueName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if err = resp.HttpResponse.Unmarshal(&resp); err != nil { + return resp, fmt.Errorf("unmarshalling response: %+v", err) + } + } + + return +} + +type peekOptions struct { + numberOfMessages int +} + +func (p peekOptions) ToHeaders() *client.Headers { + return nil +} + +func (p peekOptions) ToOData() *odata.Query { + return nil +} + +func (p peekOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("numofmessages", strconv.Itoa(p.numberOfMessages)) + out.Append("peekonly", "true") + return out +} diff --git a/storage/2023-11-03/queue/messages/put.go b/storage/2023-11-03/queue/messages/put.go new file mode 100644 index 0000000..19cde71 --- /dev/null +++ b/storage/2023-11-03/queue/messages/put.go @@ -0,0 +1,114 @@ +package messages + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "io" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type PutInput struct { + // A message must be in a format that can be included in an XML request with UTF-8 encoding. + // The encoded message can be up to 64 KB in size. + Message string + + // The maximum time-to-live can be any positive number, + // as well as -1 indicating that the message does not expire. + // If this parameter is omitted, the default time-to-live is 7 days. + MessageTtl *int + + // Specifies the new visibility timeout value, in seconds, relative to server time. + // The new value must be larger than or equal to 0, and cannot be larger than 7 days. + // The visibility timeout of a message cannot be set to a value later than the expiry time. + // visibilitytimeout should be set to a value smaller than the time-to-live value. + // If not specified, the default value is 0. + VisibilityTimeout *int +} + +// Put adds a new message to the back of the message queue +func (c Client) Put(ctx context.Context, queueName string, input PutInput) (resp QueueMessagesListResponse, err error) { + if queueName == "" { + return resp, fmt.Errorf("`queueName` cannot be an empty string") + } + + if strings.ToLower(queueName) != queueName { + return resp, fmt.Errorf("`queueName` must be a lower-cased string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPost, + OptionsObject: putOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/messages", queueName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + marshalledMsg, err := xml.Marshal(QueueMessage{ + MessageText: input.Message, + }) + if err != nil { + return resp, fmt.Errorf("marshalling request: %v", err) + } + + body := xml.Header + string(marshalledMsg) + req.Body = io.NopCloser(bytes.NewReader([]byte(body))) + req.ContentLength = int64(len(body)) + req.Header.Set("Content-Length", strconv.Itoa(len(body))) + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if err = resp.HttpResponse.Unmarshal(&resp); err != nil { + return resp, fmt.Errorf("unmarshalling response: %+v", err) + } + } + + return +} + +type putOptions struct { + input PutInput +} + +func (p putOptions) ToHeaders() *client.Headers { + return nil +} + +func (p putOptions) ToOData() *odata.Query { + return nil +} + +func (p putOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + + if p.input.MessageTtl != nil { + out.Append("messagettl", strconv.Itoa(*p.input.MessageTtl)) + } + + if p.input.VisibilityTimeout != nil { + out.Append("visibilitytimeout", strconv.Itoa(*p.input.VisibilityTimeout)) + } + + return out +} diff --git a/storage/2023-11-03/queue/messages/update.go b/storage/2023-11-03/queue/messages/update.go new file mode 100644 index 0000000..fa38011 --- /dev/null +++ b/storage/2023-11-03/queue/messages/update.go @@ -0,0 +1,105 @@ +package messages + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "io" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type UpdateInput struct { + // A message must be in a format that can be included in an XML request with UTF-8 encoding. + // The encoded message can be up to 64 KB in size. + Message string + + // Specifies the valid pop receipt value required to modify this message. + PopReceipt string + + // Specifies the new visibility timeout value, in seconds, relative to server time. + // The new value must be larger than or equal to 0, and cannot be larger than 7 days. + // The visibility timeout of a message cannot be set to a value later than the expiry time. + // A message can be updated until it has been deleted or has expired. + VisibilityTimeout int +} + +type UpdateResponse struct { + HttpResponse *client.Response +} + +// Update updates an existing message based on it's Pop Receipt +func (c Client) Update(ctx context.Context, queueName string, messageID string, input UpdateInput) (resp UpdateResponse, err error) { + + if queueName == "" { + return resp, fmt.Errorf("`queueName` cannot be an empty string") + } + if strings.ToLower(queueName) != queueName { + return resp, fmt.Errorf("`queueName` must be a lower-cased string") + } + if input.PopReceipt == "" { + return resp, fmt.Errorf("`input.PopReceipt` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + }, + HttpMethod: http.MethodPut, + OptionsObject: updateOptions{ + input: input, + }, + Path: fmt.Sprintf("/%s/messages/%s", queueName, messageID), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + marshalledMsg, err := xml.Marshal(QueueMessage{ + MessageText: input.Message, + }) + if err != nil { + return resp, fmt.Errorf("marshalling request: %v", err) + } + + body := xml.Header + string(marshalledMsg) + req.Body = io.NopCloser(bytes.NewReader([]byte(body))) + req.ContentLength = int64(len(body)) + req.Header.Set("Content-Length", strconv.Itoa(len(body))) + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type updateOptions struct { + input UpdateInput +} + +func (u updateOptions) ToHeaders() *client.Headers { + return nil +} + +func (u updateOptions) ToOData() *odata.Query { + return nil +} + +func (u updateOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("visibilitytimeout", strconv.Itoa(u.input.VisibilityTimeout)) + out.Append("popreceipt", u.input.PopReceipt) + return out +} diff --git a/storage/2023-11-03/queue/messages/version.go b/storage/2023-11-03/queue/messages/version.go new file mode 100644 index 0000000..4e31341 --- /dev/null +++ b/storage/2023-11-03/queue/messages/version.go @@ -0,0 +1,5 @@ +package messages + +// APIVersion is the version of the API used for all Storage API Operations +const apiVersion = "2023-11-03" +const componentName = "queue/messages" diff --git a/storage/2023-11-03/queue/queues/README.md b/storage/2023-11-03/queue/queues/README.md new file mode 100644 index 0000000..b8b84a3 --- /dev/null +++ b/storage/2023-11-03/queue/queues/README.md @@ -0,0 +1,43 @@ +## Queue Storage Queues SDK for API version 2020-08-04 + +This package allows you to interact with the Queues Queue Storage API + +### Supported Authorizers + +* Azure Active Directory (for the Resource Endpoint `https://storage.azure.com`) +* SharedKeyLite (Blob, File & Queue) + +### Example Usage + +```go +package main + +import ( + "context" + "fmt" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/queue/queues" +) + +func Example() error { + accountName := "storageaccount1" + storageAccountKey := "ABC123...." + queueName := "myqueue" + + storageAuth := autorest.NewSharedKeyLiteAuthorizer(accountName, storageAccountKey) + queuesClient := queues.New() + queuesClient.Client.Authorizer = storageAuth + + ctx := context.TODO() + metadata := map[string]string{ + "hello": "world", + } + if _, err := queuesClient.Create(ctx, accountName, queueName, metadata); err != nil { + return fmt.Errorf("Error creating Queue: %s", err) + } + + return nil +} +``` \ No newline at end of file diff --git a/storage/2023-11-03/queue/queues/api.go b/storage/2023-11-03/queue/queues/api.go new file mode 100644 index 0000000..e051453 --- /dev/null +++ b/storage/2023-11-03/queue/queues/api.go @@ -0,0 +1,15 @@ +package queues + +import ( + "context" +) + +type StorageQueue interface { + Delete(ctx context.Context, queueName string) (DeleteResponse, error) + GetMetaData(ctx context.Context, queueName string) (GetMetaDataResponse, error) + SetMetaData(ctx context.Context, queueName string, input SetMetaDataInput) (SetMetaDataResponse, error) + Create(ctx context.Context, queueName string, input CreateInput) (CreateResponse, error) + GetResourceManagerResourceID(subscriptionID, resourceGroup, accountName, queueName string) string + SetServiceProperties(ctx context.Context, input SetStorageServicePropertiesInput) (SetStorageServicePropertiesResponse, error) + GetServiceProperties(ctx context.Context) (GetStorageServicePropertiesResponse, error) +} diff --git a/storage/2023-11-03/queue/queues/client.go b/storage/2023-11-03/queue/queues/client.go new file mode 100644 index 0000000..40aa674 --- /dev/null +++ b/storage/2023-11-03/queue/queues/client.go @@ -0,0 +1,24 @@ +package queues + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/dataplane/storage" +) + +// Client is the base client for Queue Storage Shares. + +// Client is the base client for Messages. +type Client struct { + Client *storage.BaseClient +} + +func NewWithBaseUri(baseUri string) (*Client, error) { + baseClient, err := storage.NewBaseClient(baseUri, componentName, apiVersion) + if err != nil { + return nil, fmt.Errorf("building base client: %+v", err) + } + return &Client{ + Client: baseClient, + }, nil +} diff --git a/storage/2023-11-03/queue/queues/create.go b/storage/2023-11-03/queue/queues/create.go new file mode 100644 index 0000000..d156052 --- /dev/null +++ b/storage/2023-11-03/queue/queues/create.go @@ -0,0 +1,83 @@ +package queues + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type CreateInput struct { + MetaData map[string]string +} + +type CreateResponse struct { + HttpResponse *client.Response +} + +// Create creates the specified Queue within the specified Storage Account +func (c Client) Create(ctx context.Context, queueName string, input CreateInput) (resp CreateResponse, err error) { + + if queueName == "" { + return resp, fmt.Errorf("`queueName` cannot be an empty string") + } + + if strings.ToLower(queueName) != queueName { + return resp, fmt.Errorf("`queueName` must be a lower-cased string") + } + + if err := metadata.Validate(input.MetaData); err != nil { + return resp, fmt.Errorf("`metadata` is not valid: %s", err) + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + OptionsObject: createOptions{ + metadata: input.MetaData, + }, + Path: fmt.Sprintf("/%s", queueName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type createOptions struct { + metadata map[string]string +} + +func (c createOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + + if len(c.metadata) > 0 { + headers.Merge(metadata.SetMetaDataHeaders(c.metadata)) + } + return headers +} + +func (c createOptions) ToOData() *odata.Query { + return nil +} + +func (c createOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/queue/queues/delete.go b/storage/2023-11-03/queue/queues/delete.go new file mode 100644 index 0000000..39d5d64 --- /dev/null +++ b/storage/2023-11-03/queue/queues/delete.go @@ -0,0 +1,50 @@ +package queues + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" +) + +type DeleteResponse struct { + HttpResponse *client.Response +} + +// Delete deletes the specified Queue within the specified Storage Account +func (c Client) Delete(ctx context.Context, queueName string) (resp DeleteResponse, err error) { + + if queueName == "" { + return resp, fmt.Errorf("`queueName` cannot be an empty string") + } + + if strings.ToLower(queueName) != queueName { + return resp, fmt.Errorf("`queueName` must be a lower-cased string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + OptionsObject: nil, + Path: fmt.Sprintf("/%s", queueName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} diff --git a/storage/2023-11-03/queue/queues/lifecycle_test.go b/storage/2023-11-03/queue/queues/lifecycle_test.go new file mode 100644 index 0000000..911ec01 --- /dev/null +++ b/storage/2023-11-03/queue/queues/lifecycle_test.go @@ -0,0 +1,261 @@ +package queues + +import ( + "context" + "fmt" + "log" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +var _ StorageQueue = Client{} + +func TestQueuesLifecycle(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + queueName := fmt.Sprintf("queue-%d", testhelpers.RandomInt()) + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + queuesClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.%s.%s", accountName, "queue", *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(queuesClient.Client, testData, auth.SharedKey); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + // first let's test an empty container + _, err = queuesClient.Create(ctx, queueName, CreateInput{MetaData: map[string]string{}}) + if err != nil { + t.Fatal(fmt.Errorf("error creating: %s", err)) + } + + // then let's retrieve it to ensure there's no metadata.. + resp, err := queuesClient.GetMetaData(ctx, queueName) + if err != nil { + t.Fatalf("Error retrieving MetaData: %s", err) + } + if len(resp.MetaData) != 0 { + t.Fatalf("Expected no MetaData but got: %s", err) + } + + // then let's add some.. + updatedMetaData := map[string]string{ + "band": "panic", + "boots": "the-overpass", + } + _, err = queuesClient.SetMetaData(ctx, queueName, SetMetaDataInput{MetaData: updatedMetaData}) + if err != nil { + t.Fatalf("Error setting MetaData: %s", err) + } + + resp, err = queuesClient.GetMetaData(ctx, queueName) + if err != nil { + t.Fatalf("Error re-retrieving MetaData: %s", err) + } + + if len(resp.MetaData) != 2 { + t.Fatalf("Expected metadata to have 2 items but got: %s", resp.MetaData) + } + if resp.MetaData["band"] != "panic" { + t.Fatalf("Expected `band` to be `panic` but got: %s", resp.MetaData["band"]) + } + if resp.MetaData["boots"] != "the-overpass" { + t.Fatalf("Expected `boots` to be `the-overpass` but got: %s", resp.MetaData["boots"]) + } + + // and woo let's remove it again + _, err = queuesClient.SetMetaData(ctx, queueName, SetMetaDataInput{MetaData: map[string]string{}}) + if err != nil { + t.Fatalf("Error setting MetaData: %s", err) + } + + resp, err = queuesClient.GetMetaData(ctx, queueName) + if err != nil { + t.Fatalf("Error retrieving MetaData: %s", err) + } + if len(resp.MetaData) != 0 { + t.Fatalf("Expected no MetaData but got: %s", err) + } + + // set some properties + props := StorageServiceProperties{ + Logging: &LoggingConfig{ + Version: "1.0", + Delete: true, + Read: true, + Write: true, + RetentionPolicy: RetentionPolicy{ + Enabled: true, + Days: 7, + }, + }, + Cors: &Cors{ + CorsRule: []CorsRule{ + CorsRule{ + AllowedMethods: "GET,PUT", + AllowedOrigins: "http://www.example.com", + ExposedHeaders: "x-tempo-*", + AllowedHeaders: "x-tempo-*", + MaxAgeInSeconds: 500, + }, + CorsRule{ + AllowedMethods: "POST", + AllowedOrigins: "http://www.test.com", + ExposedHeaders: "*", + AllowedHeaders: "x-method-*", + MaxAgeInSeconds: 200, + }, + }, + }, + HourMetrics: &MetricsConfig{ + Version: "1.0", + Enabled: false, + RetentionPolicy: RetentionPolicy{ + Enabled: true, + Days: 7, + }, + }, + MinuteMetrics: &MetricsConfig{ + Version: "1.0", + Enabled: false, + RetentionPolicy: RetentionPolicy{ + Enabled: true, + Days: 7, + }, + }, + } + _, err = queuesClient.SetServiceProperties(ctx, SetStorageServicePropertiesInput{Properties: props}) + if err != nil { + t.Fatalf("SetServiceProperties failed: %s", err) + } + + properties, err := queuesClient.GetServiceProperties(ctx) + if err != nil { + t.Fatalf("GetServiceProperties failed: %s", err) + } + + if len(properties.Cors.CorsRule) > 1 { + if properties.Cors.CorsRule[0].AllowedMethods != "GET,PUT" { + t.Fatalf("CORS Methods weren't set!") + } + if properties.Cors.CorsRule[1].AllowedMethods != "POST" { + t.Fatalf("CORS Methods weren't set!") + } + } else { + t.Fatalf("CORS Methods weren't set!") + } + + if properties.HourMetrics.Enabled { + t.Fatalf("HourMetrics were enabled when they shouldn't be!") + } + + if properties.MinuteMetrics.Enabled { + t.Fatalf("MinuteMetrics were enabled when they shouldn't be!") + } + + if !properties.Logging.Write { + t.Fatalf("Logging Write's was not enabled when they should be!") + } + + includeAPIS := true + // set some properties + props2 := StorageServiceProperties{ + Logging: &LoggingConfig{ + Version: "1.0", + Delete: true, + Read: true, + Write: true, + RetentionPolicy: RetentionPolicy{ + Enabled: true, + Days: 7, + }, + }, + Cors: &Cors{ + CorsRule: []CorsRule{ + CorsRule{ + AllowedMethods: "PUT", + AllowedOrigins: "http://www.example.com", + ExposedHeaders: "x-tempo-*", + AllowedHeaders: "x-tempo-*", + MaxAgeInSeconds: 500, + }, + }, + }, + HourMetrics: &MetricsConfig{ + Version: "1.0", + Enabled: true, + RetentionPolicy: RetentionPolicy{ + Enabled: true, + Days: 7, + }, + IncludeAPIs: &includeAPIS, + }, + MinuteMetrics: &MetricsConfig{ + Version: "1.0", + Enabled: false, + RetentionPolicy: RetentionPolicy{ + Enabled: true, + Days: 7, + }, + }, + } + + _, err = queuesClient.SetServiceProperties(ctx, SetStorageServicePropertiesInput{Properties: props2}) + if err != nil { + t.Fatalf("SetServiceProperties failed: %s", err) + } + + properties, err = queuesClient.GetServiceProperties(ctx) + if err != nil { + t.Fatalf("GetServiceProperties failed: %s", err) + } + + if len(properties.Cors.CorsRule) == 1 { + if properties.Cors.CorsRule[0].AllowedMethods != "PUT" { + t.Fatalf("CORS Methods weren't set!") + } + } else { + t.Fatalf("CORS Methods weren't set!") + } + + if !properties.HourMetrics.Enabled { + t.Fatalf("HourMetrics were enabled when they shouldn't be!") + } + + if properties.MinuteMetrics.Enabled { + t.Fatalf("MinuteMetrics were enabled when they shouldn't be!") + } + + if !properties.Logging.Write { + t.Fatalf("Logging Write's was not enabled when they should be!") + } + + log.Printf("[DEBUG] Deleting..") + _, err = queuesClient.Delete(ctx, queueName) + if err != nil { + t.Fatal(fmt.Errorf("error deleting: %s", err)) + } +} diff --git a/storage/2023-11-03/queue/queues/metadata_get.go b/storage/2023-11-03/queue/queues/metadata_get.go new file mode 100644 index 0000000..14c32d6 --- /dev/null +++ b/storage/2023-11-03/queue/queues/metadata_get.go @@ -0,0 +1,76 @@ +package queues + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type GetMetaDataResponse struct { + HttpResponse *client.Response + + MetaData map[string]string +} + +// GetMetaData returns the metadata for this Queue +func (c Client) GetMetaData(ctx context.Context, queueName string) (resp GetMetaDataResponse, err error) { + + if queueName == "" { + return resp, fmt.Errorf("`queueName` cannot be an empty string") + } + + if strings.ToLower(queueName) != queueName { + return resp, fmt.Errorf("`queueName` must be a lower-cased string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: getMetaDataOptions{}, + Path: fmt.Sprintf("/%s", queueName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Header != nil { + resp.MetaData = metadata.ParseFromHeaders(resp.HttpResponse.Header) + } + } + + return +} + +type getMetaDataOptions struct{} + +func (g getMetaDataOptions) ToHeaders() *client.Headers { + return nil +} + +func (g getMetaDataOptions) ToOData() *odata.Query { + return nil +} + +func (g getMetaDataOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "metadata") + return out +} diff --git a/storage/2023-11-03/queue/queues/metadata_set.go b/storage/2023-11-03/queue/queues/metadata_set.go new file mode 100644 index 0000000..ea8eac3 --- /dev/null +++ b/storage/2023-11-03/queue/queues/metadata_set.go @@ -0,0 +1,82 @@ +package queues + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type SetMetaDataResponse struct { + HttpResponse *client.Response +} + +type SetMetaDataInput struct { + MetaData map[string]string +} + +// SetMetaData returns the metadata for this Queue +func (c Client) SetMetaData(ctx context.Context, queueName string, input SetMetaDataInput) (resp SetMetaDataResponse, err error) { + + if queueName == "" { + return resp, fmt.Errorf("`queueName` cannot be an empty string") + } + + if strings.ToLower(queueName) != queueName { + return resp, fmt.Errorf("`queueName` must be a lower-cased string") + } + + if err := metadata.Validate(input.MetaData); err != nil { + return resp, fmt.Errorf("`metadata` is not valid: %v", err) + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + }, + HttpMethod: http.MethodPut, + OptionsObject: setMetaDataOptions{ + metadata: input.MetaData, + }, + Path: fmt.Sprintf("/%s", queueName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type setMetaDataOptions struct { + metadata map[string]string +} + +func (s setMetaDataOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Merge(metadata.SetMetaDataHeaders(s.metadata)) + return headers +} + +func (s setMetaDataOptions) ToOData() *odata.Query { + return nil +} + +func (s setMetaDataOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "metadata") + return out +} diff --git a/storage/2023-11-03/queue/queues/models.go b/storage/2023-11-03/queue/queues/models.go new file mode 100644 index 0000000..2195244 --- /dev/null +++ b/storage/2023-11-03/queue/queues/models.go @@ -0,0 +1,42 @@ +package queues + +type StorageServiceProperties struct { + Logging *LoggingConfig `xml:"Logging,omitempty"` + HourMetrics *MetricsConfig `xml:"HourMetrics,omitempty"` + MinuteMetrics *MetricsConfig `xml:"MinuteMetrics,omitempty"` + Cors *Cors `xml:"Cors,omitempty"` +} + +type LoggingConfig struct { + Version string `xml:"Version"` + Delete bool `xml:"Delete"` + Read bool `xml:"Read"` + Write bool `xml:"Write"` + RetentionPolicy RetentionPolicy `xml:"RetentionPolicy"` +} + +type MetricsConfig struct { + Version string `xml:"Version"` + Enabled bool `xml:"Enabled"` + RetentionPolicy RetentionPolicy `xml:"RetentionPolicy"` + + // Element IncludeAPIs is only expected when Metrics is enabled + IncludeAPIs *bool `xml:"IncludeAPIs,omitempty"` +} + +type RetentionPolicy struct { + Enabled bool `xml:"Enabled"` + Days int `xml:"Days,omitempty"` +} + +type Cors struct { + CorsRule []CorsRule `xml:"CorsRule"` +} + +type CorsRule struct { + AllowedOrigins string `xml:"AllowedOrigins"` + AllowedMethods string `xml:"AllowedMethods"` + AllowedHeaders string `xml:"AllowedHeaders` + ExposedHeaders string `xml:"ExposedHeaders"` + MaxAgeInSeconds int `xml:"MaxAgeInSeconds"` +} diff --git a/storage/2023-11-03/queue/queues/properties_get.go b/storage/2023-11-03/queue/queues/properties_get.go new file mode 100644 index 0000000..b2ea774 --- /dev/null +++ b/storage/2023-11-03/queue/queues/properties_get.go @@ -0,0 +1,67 @@ +package queues + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type GetStorageServicePropertiesResponse struct { + StorageServiceProperties + HttpResponse *client.Response +} + +// GetServiceProperties gets the properties for this queue +func (c Client) GetServiceProperties(ctx context.Context) (resp GetStorageServicePropertiesResponse, err error) { + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: getStorageServicePropertiesOptions{}, + Path: "/", + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + err = resp.HttpResponse.Unmarshal(&resp) + if err != nil { + return resp, fmt.Errorf("unmarshalling respnse: %v", err) + } + } + + return +} + +type getStorageServicePropertiesOptions struct{} + +func (g getStorageServicePropertiesOptions) ToHeaders() *client.Headers { + return nil +} + +func (g getStorageServicePropertiesOptions) ToOData() *odata.Query { + return nil +} + +func (g getStorageServicePropertiesOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "properties") + out.Append("restype", "service") + return out +} diff --git a/storage/2023-11-03/queue/queues/properties_set.go b/storage/2023-11-03/queue/queues/properties_set.go new file mode 100644 index 0000000..744bd97 --- /dev/null +++ b/storage/2023-11-03/queue/queues/properties_set.go @@ -0,0 +1,76 @@ +package queues + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type SetStorageServicePropertiesResponse struct { + HttpResponse *client.Response +} + +type SetStorageServicePropertiesInput struct { + Properties StorageServiceProperties +} + +// SetServiceProperties sets the properties for this queue +func (c Client) SetServiceProperties(ctx context.Context, input SetStorageServicePropertiesInput) (resp SetStorageServicePropertiesResponse, err error) { + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodPut, + OptionsObject: setStorageServicePropertiesOptions{}, + Path: "/", + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + marshalledProps, err := xml.Marshal(&input.Properties) + if err != nil { + return resp, fmt.Errorf("marshalling request: %v", err) + } + body := xml.Header + string(marshalledProps) + req.Body = io.NopCloser(bytes.NewReader([]byte(body))) + req.ContentLength = int64(len(body)) + req.Header.Set("Content-Length", strconv.Itoa(len(body))) + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type setStorageServicePropertiesOptions struct{} + +func (s setStorageServicePropertiesOptions) ToHeaders() *client.Headers { + return nil +} + +func (s setStorageServicePropertiesOptions) ToOData() *odata.Query { + return nil +} + +func (s setStorageServicePropertiesOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("restype", "service") + out.Append("comp", "properties") + return out +} diff --git a/storage/2023-11-03/queue/queues/resource_id.go b/storage/2023-11-03/queue/queues/resource_id.go new file mode 100644 index 0000000..7fe5124 --- /dev/null +++ b/storage/2023-11-03/queue/queues/resource_id.go @@ -0,0 +1,12 @@ +package queues + +import ( + "fmt" +) + +// GetResourceManagerResourceID returns the Resource Manager ID for the given Queue +// This can be useful when, for example, you're using this as a unique identifier +func (c Client) GetResourceManagerResourceID(subscriptionID, resourceGroup, accountName, queueName string) string { + fmtStr := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Storage/storageAccounts/%s/queueServices/default/queues/%s" + return fmt.Sprintf(fmtStr, subscriptionID, resourceGroup, accountName, queueName) +} diff --git a/storage/2023-11-03/queue/queues/resource_id_test.go b/storage/2023-11-03/queue/queues/resource_id_test.go new file mode 100644 index 0000000..5bfdb67 --- /dev/null +++ b/storage/2023-11-03/queue/queues/resource_id_test.go @@ -0,0 +1,11 @@ +package queues + +import "testing" + +func TestGetResourceManagerResourceID(t *testing.T) { + actual := Client{}.GetResourceManagerResourceID("11112222-3333-4444-5555-666677778888", "group1", "account1", "queue1") + expected := "/subscriptions/11112222-3333-4444-5555-666677778888/resourceGroups/group1/providers/Microsoft.Storage/storageAccounts/account1/queueServices/default/queues/queue1" + if actual != expected { + t.Fatalf("Expected the Resource Manager Resource ID to be %q but got %q", expected, actual) + } +} diff --git a/storage/2023-11-03/queue/queues/version.go b/storage/2023-11-03/queue/queues/version.go new file mode 100644 index 0000000..c22267e --- /dev/null +++ b/storage/2023-11-03/queue/queues/version.go @@ -0,0 +1,5 @@ +package queues + +// APIVersion is the version of the API used for all Storage API Operations +const apiVersion = "2023-11-03" +const componentName = "queue/queues" diff --git a/storage/2023-11-03/table/entities/README.md b/storage/2023-11-03/table/entities/README.md new file mode 100644 index 0000000..e59b5a2 --- /dev/null +++ b/storage/2023-11-03/table/entities/README.md @@ -0,0 +1,48 @@ +## Table Storage Entities SDK for API version 2020-08-04 + +This package allows you to interact with the Entities Table Storage API + +### Supported Authorizers + +* SharedKeyLite (Table) + +### Example Usage + +```go +package main + +import ( + "context" + "fmt" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/table/entities" +) + +func Example() error { + accountName := "storageaccount1" + storageAccountKey := "ABC123...." + tableName := "mytable" + + storageAuth := autorest.NewSharedKeyLiteTableAuthorizer(accountName, storageAccountKey) + entitiesClient := entities.New() + entitiesClient.Client.Authorizer = storageAuth + + ctx := context.TODO() + input := entities.InsertEntityInput{ + PartitionKey: "abc", + RowKey: "123", + MetaDataLevel: entities.NoMetaData, + Entity: map[string]interface{}{ + "title": "Don't Kill My Vibe", + "artist": "Sigrid", + }, + } + if _, err := entitiesClient.Insert(ctx, accountName, tableName, input); err != nil { + return fmt.Errorf("Error creating Entity: %s", err) + } + + return nil +} +``` \ No newline at end of file diff --git a/storage/2023-11-03/table/entities/api.go b/storage/2023-11-03/table/entities/api.go new file mode 100644 index 0000000..bb0e980 --- /dev/null +++ b/storage/2023-11-03/table/entities/api.go @@ -0,0 +1,14 @@ +package entities + +import ( + "context" +) + +type StorageTableEntity interface { + Delete(ctx context.Context, tableName string, input DeleteEntityInput) (resp DeleteEntityResponse, err error) + Insert(ctx context.Context, tableName string, input InsertEntityInput) (resp InsertResponse, err error) + InsertOrReplace(ctx context.Context, tableName string, input InsertOrReplaceEntityInput) (resp InsertOrReplaceResponse, err error) + InsertOrMerge(ctx context.Context, tableName string, input InsertOrMergeEntityInput) (resp InsertOrMergeResponse, err error) + Query(ctx context.Context, tableName string, input QueryEntitiesInput) (resp QueryEntitiesResponse, err error) + Get(ctx context.Context, tableName string, input GetEntityInput) (resp GetEntityResponse, err error) +} diff --git a/storage/2023-11-03/table/entities/client.go b/storage/2023-11-03/table/entities/client.go new file mode 100644 index 0000000..d1182ea --- /dev/null +++ b/storage/2023-11-03/table/entities/client.go @@ -0,0 +1,23 @@ +package entities + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/dataplane/storage" +) + +// Client is the base client for Table Storage Shares. +type Client struct { + Client *storage.BaseClient +} + +func NewWithBaseUri(baseUri string) (*Client, error) { + baseClient, err := storage.NewBaseClient(baseUri, componentName, apiVersion) + if err != nil { + return nil, fmt.Errorf("building base client: %+v", err) + } + + return &Client{ + Client: baseClient, + }, nil +} diff --git a/storage/2023-11-03/table/entities/delete.go b/storage/2023-11-03/table/entities/delete.go new file mode 100644 index 0000000..05d09b3 --- /dev/null +++ b/storage/2023-11-03/table/entities/delete.go @@ -0,0 +1,80 @@ +package entities + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type DeleteEntityInput struct { + // When inserting an entity into a table, you must specify values for the PartitionKey and RowKey system properties. + // Together, these properties form the primary key and must be unique within the table. + // Both the PartitionKey and RowKey values must be string values; each key value may be up to 64 KB in size. + // If you are using an integer value for the key value, you should convert the integer to a fixed-width string, + // because they are canonically sorted. For example, you should convert the value 1 to 0000001 to ensure proper sorting. + RowKey string + PartitionKey string +} + +type DeleteEntityResponse struct { + HttpResponse *client.Response +} + +// Delete deletes an existing entity in a table. +func (c Client) Delete(ctx context.Context, tableName string, input DeleteEntityInput) (resp DeleteEntityResponse, err error) { + + if tableName == "" { + return resp, fmt.Errorf("`tableName` cannot be an empty string") + } + + if input.PartitionKey == "" { + return resp, fmt.Errorf("`input.PartitionKey` cannot be an empty string") + } + + if input.RowKey == "" { + return resp, fmt.Errorf("`input.RowKey` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/json", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + OptionsObject: deleteEntitiesOptions{}, + Path: fmt.Sprintf("/%s(PartitionKey='%s', RowKey='%s')", tableName, input.PartitionKey, input.RowKey), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + return +} + +type deleteEntitiesOptions struct{} + +func (d deleteEntitiesOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("Accept", "application/json") + headers.Append("If-Match", "*") + return headers +} + +func (d deleteEntitiesOptions) ToOData() *odata.Query { + return nil +} + +func (d deleteEntitiesOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/table/entities/get.go b/storage/2023-11-03/table/entities/get.go new file mode 100644 index 0000000..14cd5ad --- /dev/null +++ b/storage/2023-11-03/table/entities/get.go @@ -0,0 +1,92 @@ +package entities + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type GetEntityInput struct { + PartitionKey string + RowKey string + + // The Level of MetaData which should be returned + MetaDataLevel MetaDataLevel +} + +type GetEntityResponse struct { + HttpResponse *client.Response + + Entity map[string]interface{} +} + +// Get queries entities in a table and includes the $filter and $select options. +func (c Client) Get(ctx context.Context, tableName string, input GetEntityInput) (resp GetEntityResponse, err error) { + if tableName == "" { + return resp, fmt.Errorf("`tableName` cannot be an empty string") + } + + if input.PartitionKey == "" { + return resp, fmt.Errorf("`input.PartitionKey` cannot be an empty string") + } + + if input.RowKey == "" { + return resp, fmt.Errorf("`input.RowKey` cannot be an empty string") + } + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: getEntitiesOptions{ + MetaDataLevel: input.MetaDataLevel, + }, + Path: fmt.Sprintf("/%s(PartitionKey='%s', RowKey='%s')", tableName, input.PartitionKey, input.RowKey), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Body != nil { + err = resp.HttpResponse.Unmarshal(&resp.Entity) + if err != nil { + return resp, fmt.Errorf("unmarshalling response: %+v", err) + } + } + } + return +} + +type getEntitiesOptions struct { + MetaDataLevel MetaDataLevel +} + +func (g getEntitiesOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("Accept", fmt.Sprintf("application/json;odata=%s", g.MetaDataLevel)) + headers.Append("DataServiceVersion", "3.0;NetFx") + headers.Append("MaxDataServiceVersion", "3.0;NetFx") + return headers +} + +func (g getEntitiesOptions) ToOData() *odata.Query { + return nil +} + +func (g getEntitiesOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/table/entities/insert.go b/storage/2023-11-03/table/entities/insert.go new file mode 100644 index 0000000..315b7ce --- /dev/null +++ b/storage/2023-11-03/table/entities/insert.go @@ -0,0 +1,100 @@ +package entities + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type InsertEntityInput struct { + // The level of MetaData provided for this Entity + MetaDataLevel MetaDataLevel + + // The Entity which should be inserted, by default all values are strings + // To explicitly type a property, specify the appropriate OData data type by setting + // the m:type attribute within the property definition + Entity map[string]interface{} + + // When inserting an entity into a table, you must specify values for the PartitionKey and RowKey system properties. + // Together, these properties form the primary key and must be unique within the table. + // Both the PartitionKey and RowKey values must be string values; each key value may be up to 64 KB in size. + // If you are using an integer value for the key value, you should convert the integer to a fixed-width string, + // because they are canonically sorted. For example, you should convert the value 1 to 0000001 to ensure proper sorting. + RowKey string + PartitionKey string +} + +type InsertResponse struct { + HttpResponse *client.Response +} + +// Insert inserts a new entity into a table. +func (c Client) Insert(ctx context.Context, tableName string, input InsertEntityInput) (resp InsertResponse, err error) { + if tableName == "" { + return resp, fmt.Errorf("`tableName` cannot be an empty string") + } + + if input.PartitionKey == "" { + return resp, fmt.Errorf("`input.PartitionKey` cannot be an empty string") + } + + if input.RowKey == "" { + return resp, fmt.Errorf("`input.RowKey` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/json", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + }, + HttpMethod: http.MethodPost, + OptionsObject: insertOptions{ + MetaDataLevel: input.MetaDataLevel, + }, + Path: fmt.Sprintf("/%s", tableName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + input.Entity["PartitionKey"] = input.PartitionKey + input.Entity["RowKey"] = input.RowKey + + err = req.Marshal(&input.Entity) + if err != nil { + return resp, fmt.Errorf("marshalling request: %v", err) + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type insertOptions struct { + MetaDataLevel MetaDataLevel +} + +func (i insertOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("Accept", fmt.Sprintf("application/json;odata=%s", i.MetaDataLevel)) + headers.Append("Prefer", "return-no-content") + return headers +} + +func (i insertOptions) ToOData() *odata.Query { + return nil +} + +func (i insertOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/table/entities/insert_or_merge.go b/storage/2023-11-03/table/entities/insert_or_merge.go new file mode 100644 index 0000000..27cae93 --- /dev/null +++ b/storage/2023-11-03/table/entities/insert_or_merge.go @@ -0,0 +1,94 @@ +package entities + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type InsertOrMergeEntityInput struct { + // The Entity which should be inserted, by default all values are strings + // To explicitly type a property, specify the appropriate OData data type by setting + // the m:type attribute within the property definition + Entity map[string]interface{} + + // When inserting an entity into a table, you must specify values for the PartitionKey and RowKey system properties. + // Together, these properties form the primary key and must be unique within the table. + // Both the PartitionKey and RowKey values must be string values; each key value may be up to 64 KB in size. + // If you are using an integer value for the key value, you should convert the integer to a fixed-width string, + // because they are canonically sorted. For example, you should convert the value 1 to 0000001 to ensure proper sorting. + RowKey string + PartitionKey string +} + +type InsertOrMergeResponse struct { + HttpResponse *client.Response +} + +// InsertOrMerge updates an existing entity or inserts a new entity if it does not exist in the table. +// Because this operation can insert or update an entity, it is also known as an upsert operation. +func (c Client) InsertOrMerge(ctx context.Context, tableName string, input InsertOrMergeEntityInput) (resp InsertOrMergeResponse, err error) { + if tableName == "" { + return resp, fmt.Errorf("`tableName` cannot be an empty string") + } + + if input.PartitionKey == "" { + return resp, fmt.Errorf("`input.PartitionKey` cannot be an empty string") + } + + if input.RowKey == "" { + return resp, fmt.Errorf("`input.RowKey` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/json", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + }, + HttpMethod: "MERGE", + OptionsObject: insertOrMergeOptions{}, + Path: fmt.Sprintf("/%s(PartitionKey='%s', RowKey='%s')", tableName, input.PartitionKey, input.RowKey), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + input.Entity["PartitionKey"] = input.PartitionKey + input.Entity["RowKey"] = input.RowKey + + err = req.Marshal(&input.Entity) + if err != nil { + return resp, fmt.Errorf("marshalling request: %v", err) + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type insertOrMergeOptions struct{} + +func (i insertOrMergeOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("Accept", "application/json") + headers.Append("Prefer", "return-no-content") + return headers +} + +func (i insertOrMergeOptions) ToOData() *odata.Query { + return nil +} + +func (i insertOrMergeOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/table/entities/insert_or_replace.go b/storage/2023-11-03/table/entities/insert_or_replace.go new file mode 100644 index 0000000..6f97eb6 --- /dev/null +++ b/storage/2023-11-03/table/entities/insert_or_replace.go @@ -0,0 +1,94 @@ +package entities + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type InsertOrReplaceEntityInput struct { + // The Entity which should be inserted, by default all values are strings + // To explicitly type a property, specify the appropriate OData data type by setting + // the m:type attribute within the property definition + Entity map[string]interface{} + + // When inserting an entity into a table, you must specify values for the PartitionKey and RowKey system properties. + // Together, these properties form the primary key and must be unique within the table. + // Both the PartitionKey and RowKey values must be string values; each key value may be up to 64 KB in size. + // If you are using an integer value for the key value, you should convert the integer to a fixed-width string, + // because they are canonically sorted. For example, you should convert the value 1 to 0000001 to ensure proper sorting. + RowKey string + PartitionKey string +} + +type InsertOrReplaceResponse struct { + HttpResponse *client.Response +} + +// InsertOrReplace replaces an existing entity or inserts a new entity if it does not exist in the table. +// Because this operation can insert or update an entity, it is also known as an upsert operation. +func (c Client) InsertOrReplace(ctx context.Context, tableName string, input InsertOrReplaceEntityInput) (resp InsertOrReplaceResponse, err error) { + if tableName == "" { + return resp, fmt.Errorf("`tableName` cannot be an empty string") + } + + if input.PartitionKey == "" { + return resp, fmt.Errorf("`input.PartitionKey` cannot be an empty string") + } + + if input.RowKey == "" { + return resp, fmt.Errorf("`input.RowKey` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/json", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + }, + HttpMethod: "MERGE", + OptionsObject: insertOrReplaceOptions{}, + Path: fmt.Sprintf("/%s(PartitionKey='%s', RowKey='%s')", tableName, input.PartitionKey, input.RowKey), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + input.Entity["PartitionKey"] = input.PartitionKey + input.Entity["RowKey"] = input.RowKey + + err = req.Marshal(&input.Entity) + if err != nil { + return resp, fmt.Errorf("marshalling request: %v", err) + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type insertOrReplaceOptions struct{} + +func (i insertOrReplaceOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("Accept", "application/json") + headers.Append("Prefer", "return-no-content") + return headers +} + +func (i insertOrReplaceOptions) ToOData() *odata.Query { + return nil +} + +func (i insertOrReplaceOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/table/entities/lifecycle_test.go b/storage/2023-11-03/table/entities/lifecycle_test.go new file mode 100644 index 0000000..51d4bcf --- /dev/null +++ b/storage/2023-11-03/table/entities/lifecycle_test.go @@ -0,0 +1,156 @@ +package entities + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/table/tables" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +var _ StorageTableEntity = Client{} + +func TestEntitiesLifecycle(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + tableName := fmt.Sprintf("table%d", testhelpers.RandomInt()) + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + tablesClient, err := tables.NewWithBaseUri(fmt.Sprintf("https://%s.%s.%s", accountName, "table", *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(tablesClient.Client, testData, auth.SharedKeyTable); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + t.Logf("[DEBUG] Creating Table..") + if _, err := tablesClient.Create(ctx, tableName); err != nil { + t.Fatalf("Error creating Table %q: %s", tableName, err) + } + defer tablesClient.Delete(ctx, tableName) + + entitiesClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.%s.%s", accountName, "table", *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(entitiesClient.Client, testData, auth.SharedKeyTable); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + partitionKey := "hello" + rowKey := "there" + + t.Logf("[DEBUG] Inserting..") + insertInput := InsertEntityInput{ + MetaDataLevel: NoMetaData, + PartitionKey: partitionKey, + RowKey: rowKey, + Entity: map[string]interface{}{ + "hello": "world", + }, + } + if _, err := entitiesClient.Insert(ctx, tableName, insertInput); err != nil { + t.Logf("Error retrieving: %s", err) + } + + t.Logf("[DEBUG] Insert or Merging..") + insertOrMergeInput := InsertOrMergeEntityInput{ + PartitionKey: partitionKey, + RowKey: rowKey, + Entity: map[string]interface{}{ + "hello": "ther88e", + }, + } + if _, err := entitiesClient.InsertOrMerge(ctx, tableName, insertOrMergeInput); err != nil { + t.Logf("Error insert/merging: %s", err) + } + + t.Logf("[DEBUG] Insert or Replacing..") + insertOrReplaceInput := InsertOrReplaceEntityInput{ + PartitionKey: partitionKey, + RowKey: rowKey, + Entity: map[string]interface{}{ + "hello": "pandas", + }, + } + if _, err := entitiesClient.InsertOrReplace(ctx, tableName, insertOrReplaceInput); err != nil { + t.Logf("Error inserting/replacing: %s", err) + } + + t.Logf("[DEBUG] Querying..") + queryInput := QueryEntitiesInput{ + MetaDataLevel: NoMetaData, + } + results, err := entitiesClient.Query(ctx, tableName, queryInput) + if err != nil { + t.Logf("Error querying: %s", err) + } + + if len(results.Entities) != 1 { + t.Fatalf("Expected 1 item but got %d", len(results.Entities)) + } + + for _, v := range results.Entities { + thisPartitionKey := v["PartitionKey"].(string) + thisRowKey := v["RowKey"].(string) + if partitionKey != thisPartitionKey { + t.Fatalf("Expected Partition Key to be %q but got %q", partitionKey, thisPartitionKey) + } + if rowKey != thisRowKey { + t.Fatalf("Expected Partition Key to be %q but got %q", rowKey, thisRowKey) + } + } + + t.Logf("[DEBUG] Retrieving..") + getInput := GetEntityInput{ + MetaDataLevel: MinimalMetaData, + PartitionKey: partitionKey, + RowKey: rowKey, + } + getResults, err := entitiesClient.Get(ctx, tableName, getInput) + if err != nil { + t.Logf("Error querying: %s", err) + } + + partitionKey2 := getResults.Entity["PartitionKey"].(string) + rowKey2 := getResults.Entity["RowKey"].(string) + if partitionKey2 != partitionKey { + t.Fatalf("Expected Partition Key to be %q but got %q", partitionKey, partitionKey2) + } + if rowKey2 != rowKey { + t.Fatalf("Expected Row Key to be %q but got %q", rowKey, rowKey2) + } + + t.Logf("[DEBUG] Deleting..") + deleteInput := DeleteEntityInput{ + PartitionKey: partitionKey, + RowKey: rowKey, + } + if _, err := entitiesClient.Delete(ctx, tableName, deleteInput); err != nil { + t.Logf("Error deleting: %s", err) + } +} diff --git a/storage/2023-11-03/table/entities/models.go b/storage/2023-11-03/table/entities/models.go new file mode 100644 index 0000000..e3c6ccc --- /dev/null +++ b/storage/2023-11-03/table/entities/models.go @@ -0,0 +1,9 @@ +package entities + +type MetaDataLevel string + +var ( + NoMetaData MetaDataLevel = "nometadata" + MinimalMetaData MetaDataLevel = "minimalmetadata" + FullMetaData MetaDataLevel = "fullmetadata" +) diff --git a/storage/2023-11-03/table/entities/query.go b/storage/2023-11-03/table/entities/query.go new file mode 100644 index 0000000..9137401 --- /dev/null +++ b/storage/2023-11-03/table/entities/query.go @@ -0,0 +1,142 @@ +package entities + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type QueryEntitiesInput struct { + // An optional OData filter + Filter *string + + // An optional comma-separated + PropertyNamesToSelect *[]string + + // An optional OData top + Top *int + + PartitionKey string + RowKey string + + // The Level of MetaData which should be returned + MetaDataLevel MetaDataLevel + + // The Next Partition Key used to load data from a previous point + NextPartitionKey *string + + // The Next Row Key used to load data from a previous point + NextRowKey *string +} + +type QueryEntitiesResponse struct { + HttpResponse *client.Response + + NextPartitionKey string + NextRowKey string + + MetaData string `json:"odata.metadata,omitempty"` + Entities []map[string]interface{} `json:"value"` +} + +// Query queries entities in a table and includes the $filter and $select options. +func (c Client) Query(ctx context.Context, tableName string, input QueryEntitiesInput) (resp QueryEntitiesResponse, err error) { + if tableName == "" { + return resp, fmt.Errorf("`tableName` cannot be an empty string") + } + + additionalParameters := make([]string, 0) + if input.PartitionKey != "" { + additionalParameters = append(additionalParameters, "PartitionKey='%s'", input.PartitionKey) + } + + if input.RowKey != "" { + additionalParameters = append(additionalParameters, "RowKey='%s'", input.RowKey) + } + + path := fmt.Sprintf("/%s", tableName) + if len(additionalParameters) > 0 { + path += fmt.Sprintf("(%s)", strings.Join(additionalParameters, ",")) + } + + opts := client.RequestOptions{ + ContentType: "application/json", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: queryOptions{ + input: input, + }, + Path: path, + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Body != nil { + err = resp.HttpResponse.Unmarshal(&resp) + if err != nil { + return resp, fmt.Errorf("unmarshalling response: %v", err) + } + } + } + + return +} + +type queryOptions struct { + input QueryEntitiesInput +} + +func (q queryOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("Accept", fmt.Sprintf("application/json;odata=%s", q.input.MetaDataLevel)) + headers.Append("DataServiceVersion", "3.0;NetFx") + headers.Append("MaxDataServiceVersion", "3.0;NetFx") + return headers +} + +func (q queryOptions) ToOData() *odata.Query { + return nil +} + +func (q queryOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + + if q.input.Filter != nil { + out.Append("$filter", *q.input.Filter) + } + + if q.input.PropertyNamesToSelect != nil { + out.Append("$select", strings.Join(*q.input.PropertyNamesToSelect, ",")) + } + + if q.input.Top != nil { + out.Append("$top", strconv.Itoa(*q.input.Top)) + } + + if q.input.NextPartitionKey != nil { + out.Append("NextPartitionKey", *q.input.NextPartitionKey) + } + + if q.input.NextRowKey != nil { + out.Append("NextRowKey", *q.input.NextRowKey) + } + return out +} diff --git a/storage/2023-11-03/table/entities/version.go b/storage/2023-11-03/table/entities/version.go new file mode 100644 index 0000000..5eae649 --- /dev/null +++ b/storage/2023-11-03/table/entities/version.go @@ -0,0 +1,4 @@ +package entities + +const apiVersion = "2023-11-03" +const componentName = "table/entities" diff --git a/storage/2023-11-03/table/tables/README.md b/storage/2023-11-03/table/tables/README.md new file mode 100644 index 0000000..40ec349 --- /dev/null +++ b/storage/2023-11-03/table/tables/README.md @@ -0,0 +1,39 @@ +## Table Storage Tables SDK for API version 2020-08-04 + +This package allows you to interact with the Tables Table Storage API + +### Supported Authorizers + +* SharedKeyLite (Table) + +### Example Usage + +```go +package main + +import ( + "context" + "fmt" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/tombuildsstuff/giovanni/storage/2020-08-04/table/tables" +) + +func Example() error { + accountName := "storageaccount1" + storageAccountKey := "ABC123...." + tableName := "mytable" + + storageAuth := autorest.NewSharedKeyLiteTableAuthorizer(accountName, storageAccountKey) + tablesClient := tables.New() + tablesClient.Client.Authorizer = storageAuth + + ctx := context.TODO() + if _, err := tablesClient.Insert(ctx, accountName, tableName); err != nil { + return fmt.Errorf("Error creating Table: %s", err) + } + + return nil +} +``` \ No newline at end of file diff --git a/storage/2023-11-03/table/tables/acl_get.go b/storage/2023-11-03/table/tables/acl_get.go new file mode 100644 index 0000000..d927d38 --- /dev/null +++ b/storage/2023-11-03/table/tables/acl_get.go @@ -0,0 +1,73 @@ +package tables + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type GetACLResponse struct { + HttpResponse *client.Response + + SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"` +} + +// GetACL returns the Access Control List for the specified Table +func (c Client) GetACL(ctx context.Context, tableName string) (resp GetACLResponse, err error) { + + if tableName == "" { + return resp, fmt.Errorf("`tableName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: getAclTableOptions{}, + Path: fmt.Sprintf("/%s", tableName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Body != nil { + err = resp.HttpResponse.Unmarshal(&resp) + if err != nil { + return resp, fmt.Errorf("unmarshalling response body: %v", err) + } + } + } + + return +} + +type getAclTableOptions struct{} + +func (g getAclTableOptions) ToHeaders() *client.Headers { + return nil +} + +func (g getAclTableOptions) ToOData() *odata.Query { + return nil +} + +func (g getAclTableOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "acl") + return out +} diff --git a/storage/2023-11-03/table/tables/acl_set.go b/storage/2023-11-03/table/tables/acl_set.go new file mode 100644 index 0000000..9ff4ad0 --- /dev/null +++ b/storage/2023-11-03/table/tables/acl_set.go @@ -0,0 +1,73 @@ +package tables + +import ( + "context" + "encoding/xml" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type setAcl struct { + SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"` + + XMLName xml.Name `xml:"SignedIdentifiers"` +} + +type SetACLResponse struct { + HttpResponse *client.Response +} + +// SetACL sets the specified Access Control List for the specified Table +func (c Client) SetACL(ctx context.Context, tableName string, acls []SignedIdentifier) (resp SetACLResponse, err error) { + if tableName == "" { + return resp, fmt.Errorf("`tableName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/xml; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + }, + HttpMethod: http.MethodPut, + OptionsObject: setAclTableOptions{}, + Path: fmt.Sprintf("/%s", tableName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + err = req.Marshal(setAcl{SignedIdentifiers: acls}) + if err != nil { + return resp, fmt.Errorf("marshalling request: %+v", err) + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type setAclTableOptions struct{} + +func (s setAclTableOptions) ToHeaders() *client.Headers { + return nil +} + +func (s setAclTableOptions) ToOData() *odata.Query { + return nil +} + +func (s setAclTableOptions) ToQuery() *client.QueryParams { + out := &client.QueryParams{} + out.Append("comp", "acl") + return out +} diff --git a/storage/2023-11-03/table/tables/api.go b/storage/2023-11-03/table/tables/api.go new file mode 100644 index 0000000..75c1de6 --- /dev/null +++ b/storage/2023-11-03/table/tables/api.go @@ -0,0 +1,15 @@ +package tables + +import ( + "context" +) + +type StorageTable interface { + Delete(ctx context.Context, tableName string) (resp DeleteTableResponse, err error) + Exists(ctx context.Context, tableName string) (resp TableExistsResponse, err error) + GetACL(ctx context.Context, tableName string) (resp GetACLResponse, err error) + Create(ctx context.Context, tableName string) (resp CreateTableResponse, err error) + GetResourceManagerResourceID(subscriptionID, resourceGroup, accountName, tableName string) string + Query(ctx context.Context, input QueryInput) (resp GetResponse, err error) + SetACL(ctx context.Context, tableName string, acls []SignedIdentifier) (resp SetACLResponse, err error) +} diff --git a/storage/2023-11-03/table/tables/client.go b/storage/2023-11-03/table/tables/client.go new file mode 100644 index 0000000..31e53c5 --- /dev/null +++ b/storage/2023-11-03/table/tables/client.go @@ -0,0 +1,23 @@ +package tables + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/dataplane/storage" +) + +// Client is the base client for Table Storage Shares. +type Client struct { + Client *storage.BaseClient +} + +func NewWithBaseUri(baseUri string) (*Client, error) { + baseClient, err := storage.NewBaseClient(baseUri, componentName, apiVersion) + if err != nil { + return nil, fmt.Errorf("building base client: %+v", err) + } + + return &Client{ + Client: baseClient, + }, nil +} diff --git a/storage/2023-11-03/table/tables/create.go b/storage/2023-11-03/table/tables/create.go new file mode 100644 index 0000000..feeebe4 --- /dev/null +++ b/storage/2023-11-03/table/tables/create.go @@ -0,0 +1,71 @@ +package tables + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type createTableRequest struct { + TableName string `json:"TableName"` +} + +type CreateTableResponse struct { + HttpResponse *client.Response +} + +// Create creates a new table in the storage account. +func (c Client) Create(ctx context.Context, tableName string) (resp CreateTableResponse, err error) { + if tableName == "" { + return resp, fmt.Errorf("`tableName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/json", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + }, + HttpMethod: http.MethodPost, + OptionsObject: createTableOptions{}, + Path: "/Tables", + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + err = req.Marshal(&createTableRequest{TableName: tableName}) + if err != nil { + return resp, fmt.Errorf("marshalling request") + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type createTableOptions struct{} + +func (c createTableOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("Accept", "application/json;odata=nometadata") + headers.Append("Prefer", "return-no-content") + return headers +} + +func (c createTableOptions) ToOData() *odata.Query { + return nil +} + +func (c createTableOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/table/tables/delete.go b/storage/2023-11-03/table/tables/delete.go new file mode 100644 index 0000000..3a2817f --- /dev/null +++ b/storage/2023-11-03/table/tables/delete.go @@ -0,0 +1,62 @@ +package tables + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type DeleteTableResponse struct { + HttpResponse *client.Response +} + +// Delete deletes the specified table and any data it contains. +func (c Client) Delete(ctx context.Context, tableName string) (resp DeleteTableResponse, err error) { + if tableName == "" { + return resp, fmt.Errorf("`tableName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/json", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + OptionsObject: deleteOptions{}, + Path: fmt.Sprintf("/Tables('%s')", tableName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type deleteOptions struct { +} + +func (d deleteOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("Accept", "application/json") + return headers +} + +func (d deleteOptions) ToOData() *odata.Query { + return nil +} + +func (d deleteOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/table/tables/exists.go b/storage/2023-11-03/table/tables/exists.go new file mode 100644 index 0000000..1497695 --- /dev/null +++ b/storage/2023-11-03/table/tables/exists.go @@ -0,0 +1,66 @@ +package tables + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type TableExistsResponse struct { + HttpResponse *client.Response +} + +// Exists checks that the specified table exists +func (c Client) Exists(ctx context.Context, tableName string) (resp TableExistsResponse, err error) { + if tableName == "" { + return resp, fmt.Errorf("`tableName` cannot be an empty string") + } + + opts := client.RequestOptions{ + ContentType: "application/json", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: tableExistsOptions{}, + Path: fmt.Sprintf("/Tables('%s')", tableName), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + err = req.Marshal(&createTableRequest{TableName: tableName}) + if err != nil { + return resp, fmt.Errorf("marshalling request") + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + return +} + +type tableExistsOptions struct{} + +func (t tableExistsOptions) ToHeaders() *client.Headers { + headers := &client.Headers{} + headers.Append("Accept", "application/json;odata=nometadata") + return headers +} + +func (t tableExistsOptions) ToOData() *odata.Query { + return nil +} + +func (t tableExistsOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/table/tables/lifecycle_test.go b/storage/2023-11-03/table/tables/lifecycle_test.go new file mode 100644 index 0000000..c840b3b --- /dev/null +++ b/storage/2023-11-03/table/tables/lifecycle_test.go @@ -0,0 +1,127 @@ +package tables + +import ( + "context" + "fmt" + "log" + "testing" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01/storageaccounts" + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/tombuildsstuff/giovanni/storage/internal/testhelpers" +) + +var _ StorageTable = Client{} + +func TestTablesLifecycle(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + client, err := testhelpers.Build(ctx, t) + if err != nil { + t.Fatal(err) + } + + resourceGroup := fmt.Sprintf("acctestrg-%d", testhelpers.RandomInt()) + accountName := fmt.Sprintf("acctestsa%s", testhelpers.RandomString()) + tableName := fmt.Sprintf("table%d", testhelpers.RandomInt()) + + testData, err := client.BuildTestResources(ctx, resourceGroup, accountName, storageaccounts.KindStorage) + if err != nil { + t.Fatal(err) + } + defer client.DestroyTestResources(ctx, resourceGroup, accountName) + + domainSuffix, ok := client.Environment.Storage.DomainSuffix() + if !ok { + t.Fatalf("storage didn't return a domain suffix for this environment") + } + tablesClient, err := NewWithBaseUri(fmt.Sprintf("https://%s.%s.%s", accountName, "table", *domainSuffix)) + if err != nil { + t.Fatalf("building client for environment: %+v", err) + } + + if err := client.PrepareWithSharedKeyAuth(tablesClient.Client, testData, auth.SharedKeyTable); err != nil { + t.Fatalf("adding authorizer to client: %+v", err) + } + + t.Logf("[DEBUG] Creating Table..") + if _, err := tablesClient.Create(ctx, tableName); err != nil { + t.Fatalf("Error creating Table %q: %s", tableName, err) + } + + // first look it up directly and confirm it's there + t.Logf("[DEBUG] Checking if Table exists..") + if _, err := tablesClient.Exists(ctx, tableName); err != nil { + t.Fatalf("Error checking if Table %q exists: %s", tableName, err) + } + + // then confirm it exists in the Query too + t.Logf("[DEBUG] Querying for Tables..") + result, err := tablesClient.Query(ctx, QueryInput{MetaDataLevel: NoMetaData}) + if err != nil { + t.Fatalf("Error retrieving Tables: %s", err) + } + found := false + for _, v := range result.Tables { + log.Printf("[DEBUG] Table: %q", v.TableName) + + if v.TableName == tableName { + found = true + } + } + if !found { + t.Fatalf("%q was not found in the Query response!", tableName) + } + + t.Logf("[DEBUG] Setting ACL's for Table %q..", tableName) + acls := []SignedIdentifier{ + { + Id: "MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI=", + AccessPolicy: AccessPolicy{ + Permission: "raud", + Start: "2020-11-26T08:49:37.0000000Z", + Expiry: "2020-11-27T08:49:37.0000000Z", + }, + }, + } + if _, err := tablesClient.SetACL(ctx, tableName, acls); err != nil { + t.Fatalf("Error setting ACLs: %s", err) + } + + t.Logf("[DEBUG] Retrieving ACL's for Table %q..", tableName) + retrievedACLs, err := tablesClient.GetACL(ctx, tableName) + if err != nil { + t.Fatalf("Error retrieving ACLs: %s", err) + } + + if len(retrievedACLs.SignedIdentifiers) != len(acls) { + t.Fatalf("Expected %d but got %q ACLs", len(acls), len(retrievedACLs.SignedIdentifiers)) + } + + for i, retrievedAcl := range retrievedACLs.SignedIdentifiers { + expectedAcl := acls[i] + + if retrievedAcl.Id != expectedAcl.Id { + t.Fatalf("Expected ID to be %q but got %q", expectedAcl.Id, retrievedAcl.Id) + } + + if retrievedAcl.AccessPolicy.Start != expectedAcl.AccessPolicy.Start { + t.Fatalf("Expected Start to be %q but got %q", expectedAcl.AccessPolicy.Start, retrievedAcl.AccessPolicy.Start) + } + + if retrievedAcl.AccessPolicy.Expiry != expectedAcl.AccessPolicy.Expiry { + t.Fatalf("Expected Expiry to be %q but got %q", expectedAcl.AccessPolicy.Expiry, retrievedAcl.AccessPolicy.Expiry) + } + + if retrievedAcl.AccessPolicy.Permission != expectedAcl.AccessPolicy.Permission { + t.Fatalf("Expected Permission to be %q but got %q", expectedAcl.AccessPolicy.Permission, retrievedAcl.AccessPolicy.Permission) + } + } + + t.Logf("[DEBUG] Deleting Table %q..", tableName) + if _, err := tablesClient.Delete(ctx, tableName); err != nil { + t.Fatalf("Error deleting %q: %s", tableName, err) + } +} diff --git a/storage/2023-11-03/table/tables/models.go b/storage/2023-11-03/table/tables/models.go new file mode 100644 index 0000000..d7c382a --- /dev/null +++ b/storage/2023-11-03/table/tables/models.go @@ -0,0 +1,29 @@ +package tables + +type MetaDataLevel string + +var ( + NoMetaData MetaDataLevel = "nometadata" + MinimalMetaData MetaDataLevel = "minimalmetadata" + FullMetaData MetaDataLevel = "fullmetadata" +) + +type GetResultItem struct { + TableName string `json:"TableName"` + + // Optional, depending on the MetaData Level + ODataType string `json:"odata.type,omitempty"` + ODataID string `json:"odata.id,omitEmpty"` + ODataEditLink string `json:"odata.editLink,omitEmpty"` +} + +type SignedIdentifier struct { + Id string `xml:"Id"` + AccessPolicy AccessPolicy `xml:"AccessPolicy"` +} + +type AccessPolicy struct { + Start string `xml:"Start"` + Expiry string `xml:"Expiry"` + Permission string `xml:"Permission"` +} diff --git a/storage/2023-11-03/table/tables/query.go b/storage/2023-11-03/table/tables/query.go new file mode 100644 index 0000000..9a15de8 --- /dev/null +++ b/storage/2023-11-03/table/tables/query.go @@ -0,0 +1,80 @@ +package tables + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +type GetResponse struct { + HttpResponse *client.Response + + MetaData string `json:"odata.metadata,omitempty"` + Tables []GetResultItem `json:"value"` +} + +type QueryInput struct { + MetaDataLevel MetaDataLevel +} + +// Query returns a list of tables under the specified account. +func (c Client) Query(ctx context.Context, input QueryInput) (resp GetResponse, err error) { + + opts := client.RequestOptions{ + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: queryOptions{ + metaDataLevel: input.MetaDataLevel, + }, + Path: "/Tables", + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + err = fmt.Errorf("building request: %+v", err) + return + } + + resp.HttpResponse, err = req.Execute(ctx) + if err != nil { + err = fmt.Errorf("executing request: %+v", err) + return + } + + if resp.HttpResponse != nil { + if resp.HttpResponse.Body != nil { + err = resp.HttpResponse.Unmarshal(&resp) + if err != nil { + return resp, fmt.Errorf("unmarshalling response: %v", err) + } + } + } + + return +} + +type queryOptions struct { + metaDataLevel MetaDataLevel +} + +func (q queryOptions) ToHeaders() *client.Headers { + // NOTE: whilst this supports ContinuationTokens and 'Top' + // it appears that 'Skip' returns a '501 Not Implemented' + // as such, we intentionally don't support those right now + headers := &client.Headers{} + headers.Append("Accept", fmt.Sprintf("application/json;odata=%s", q.metaDataLevel)) + return headers +} + +func (q queryOptions) ToOData() *odata.Query { + return nil +} + +func (q queryOptions) ToQuery() *client.QueryParams { + return nil +} diff --git a/storage/2023-11-03/table/tables/resource_id.go b/storage/2023-11-03/table/tables/resource_id.go new file mode 100644 index 0000000..7b872b9 --- /dev/null +++ b/storage/2023-11-03/table/tables/resource_id.go @@ -0,0 +1,10 @@ +package tables + +import "fmt" + +// GetResourceManagerResourceID returns the Resource ID for the given Table +// This can be useful when, for example, you're using this as a unique identifier +func (c Client) GetResourceManagerResourceID(subscriptionID, resourceGroup, accountName, tableName string) string { + fmtStr := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Storage/storageAccounts/%s/tableServices/default/tables/%s" + return fmt.Sprintf(fmtStr, subscriptionID, resourceGroup, accountName, tableName) +} diff --git a/storage/2023-11-03/table/tables/resource_id_test.go b/storage/2023-11-03/table/tables/resource_id_test.go new file mode 100644 index 0000000..7cb3d87 --- /dev/null +++ b/storage/2023-11-03/table/tables/resource_id_test.go @@ -0,0 +1,13 @@ +package tables + +import ( + "testing" +) + +func TestGetResourceManagerResourceID(t *testing.T) { + actual := Client{}.GetResourceManagerResourceID("11112222-3333-4444-5555-666677778888", "group1", "account1", "table1") + expected := "/subscriptions/11112222-3333-4444-5555-666677778888/resourceGroups/group1/providers/Microsoft.Storage/storageAccounts/account1/tableServices/default/tables/table1" + if actual != expected { + t.Fatalf("Expected the Resource Manager Resource ID to be %q but got %q", expected, actual) + } +} diff --git a/storage/2023-11-03/table/tables/version.go b/storage/2023-11-03/table/tables/version.go new file mode 100644 index 0000000..63c317f --- /dev/null +++ b/storage/2023-11-03/table/tables/version.go @@ -0,0 +1,4 @@ +package tables + +const apiVersion = "2023-11-03" +const componentName = "table/tables"