From 6d71d326812d94fdbf90f59917f733d8b584b1b9 Mon Sep 17 00:00:00 2001 From: Aparna Singh Date: Wed, 27 Mar 2024 12:15:35 +0530 Subject: [PATCH] Handling PoolSizeTooSmall error while creating ANF volumes --- frontend/csi/controller_server.go | 1 - .../mock_azure/mock_api.go | 8 +- storage_drivers/azure/api/azure.go | 271 ++++++++++-- storage_drivers/azure/api/azure_test.go | 410 ++++++++++++++++++ storage_drivers/azure/api/types.go | 2 +- storage_drivers/azure/azure_anf.go | 21 +- storage_drivers/azure/azure_anf_subvolume.go | 93 ++-- .../azure/azure_anf_subvolume_test.go | 12 +- storage_drivers/azure/azure_anf_test.go | 142 +++--- 9 files changed, 792 insertions(+), 168 deletions(-) diff --git a/frontend/csi/controller_server.go b/frontend/csi/controller_server.go index 479290a8e..535b3c69e 100644 --- a/frontend/csi/controller_server.go +++ b/frontend/csi/controller_server.go @@ -263,7 +263,6 @@ func (p *Plugin) CreateVolume( } if err != nil { - p.controllerHelper.RecordVolumeEvent(ctx, req.Name, controllerhelpers.EventTypeNormal, "ProvisioningFailed", err.Error()) return nil, p.getCSIErrorForOrchestratorError(err) } else { p.controllerHelper.RecordVolumeEvent(ctx, req.Name, v1.EventTypeNormal, "ProvisioningSuccess", "provisioned a volume") diff --git a/mocks/mock_storage_drivers/mock_azure/mock_api.go b/mocks/mock_storage_drivers/mock_azure/mock_api.go index 129ac67cf..540ce65f2 100644 --- a/mocks/mock_storage_drivers/mock_azure/mock_api.go +++ b/mocks/mock_storage_drivers/mock_azure/mock_api.go @@ -661,16 +661,16 @@ func (mr *MockAzureMockRecorder) WaitForSubvolumeState(arg0, arg1, arg2, arg3, a } // WaitForVolumeState mocks base method. -func (m *MockAzure) WaitForVolumeState(arg0 context.Context, arg1 *api.FileSystem, arg2 string, arg3 []string, arg4 time.Duration) (string, error) { +func (m *MockAzure) WaitForVolumeState(arg0 context.Context, arg1 *api.FileSystem, arg2 string, arg3 []string, arg4 time.Duration, arg5 api.Operation) (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WaitForVolumeState", arg0, arg1, arg2, arg3, arg4) + ret := m.ctrl.Call(m, "WaitForVolumeState", arg0, arg1, arg2, arg3, arg4, arg5) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // WaitForVolumeState indicates an expected call of WaitForVolumeState. -func (mr *MockAzureMockRecorder) WaitForVolumeState(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockAzureMockRecorder) WaitForVolumeState(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForVolumeState", reflect.TypeOf((*MockAzure)(nil).WaitForVolumeState), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForVolumeState", reflect.TypeOf((*MockAzure)(nil).WaitForVolumeState), arg0, arg1, arg2, arg3, arg4, arg5) } diff --git a/storage_drivers/azure/api/azure.go b/storage_drivers/azure/api/azure.go index 8cf9940f5..596a86b6d 100644 --- a/storage_drivers/azure/api/azure.go +++ b/storage_drivers/azure/api/azure.go @@ -20,6 +20,7 @@ import ( resourcegraph "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph" features "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armfeatures" "github.com/cenkalti/backoff/v4" + "go.uber.org/multierr" "sigs.k8s.io/cloud-provider-azure/pkg/azclient" . "github.com/netapp/trident/logging" @@ -39,6 +40,7 @@ const ( SDKMaxRetryDelay = 15 * time.Second CorrelationIDHeader = "X-Ms-Correlation-Request-Id" SubvolumeNameSeparator = "-file-" + PoolSizeTooSmallError = "PoolSizeTooSmall" ) var ( @@ -48,6 +50,7 @@ var ( snapshotIDRegex = regexp.MustCompile(`^/subscriptions/(?P[^/]+)/resourceGroups/(?P[^/]+)/providers/(?P[^/]+)/netAppAccounts/(?P[^/]+)/capacityPools/(?P[^/]+)/volumes/(?P[^/]+)/snapshots/(?P[^/]+)$`) subvolumeIDRegex = regexp.MustCompile(`^/subscriptions/(?P[^/]+)/resourceGroups/(?P[^/]+)/providers/(?P[^/]+)/netAppAccounts/(?P[^/]+)/capacityPools/(?P[^/]+)/volumes/(?P[^/]+)/subvolumes/(?P[^/]+)$`) subnetIDRegex = regexp.MustCompile(`^/subscriptions/(?P[^/]+)/resourceGroups/(?P[^/]+)/providers/(?P[^/]+)/virtualNetworks/(?P[^/]+)/subnets/(?P[^/]+)$`) + VolumePollerCache = AzurePollerResponseCache{pollerResponseMap: make(map[PollerKey]PollerResponse)} ) // ClientConfig holds configuration data for the API driver object. @@ -76,17 +79,144 @@ type AzureClient struct { AzureResources } -type PollerSVCreateResponse struct { - *runtime.Poller[netapp.SubvolumesClientCreateResponse] +type AzureError struct { + Id string `json:"id"` + Name string `json:"name"` + Status string `json:"status"` + StartTime string `json:"startTime"` + EndTime string `json:"endTime"` + AzError struct { + Code string `json:"code"` + Message string `json:"message"` + Details []struct { + Code string `json:"code"` + Message string `json:"message"` + } `json:"details"` + } `json:"error"` } -type PollerSVDeleteResponse struct { - *runtime.Poller[netapp.SubvolumesClientDeleteResponse] + +func (e *AzureError) Error() string { + var errMessage error + + if len(e.AzError.Details) > 0 { + for _, d := range e.AzError.Details { + errMessage = multierr.Append( + errMessage, + fmt.Errorf("%v: %v", d.Code, d.Message), + ) + } + } else { + if e.AzError.Message != "" { + errMessage = multierr.Combine( + errMessage, + fmt.Errorf("%v: %v", e.AzError.Code, e.AzError.Message), + ) + } + } + + return errMessage.Error() +} + +type Operation int64 + +const ( + Create Operation = iota + Delete + Update + Restore + Import +) + +type PollerKey struct { + ID string + Operation Operation } type PollerResponse interface { Result(context.Context) error } +type PollerResponseCache interface { + Put(key *PollerKey, value PollerResponse) error + Get(key PollerKey) (PollerResponse, bool) + Delete(key PollerKey) +} + +type AzurePollerResponseCache struct { + pollerResponseMap map[PollerKey]PollerResponse +} + +func (azPollerCache *AzurePollerResponseCache) Put(key *PollerKey, value PollerResponse) error { + if len(azPollerCache.pollerResponseMap) == 0 { + azPollerCache.pollerResponseMap = make(map[PollerKey]PollerResponse) + } + + if key == nil { + return errors.New("failed to add to poller response cache as nil key is passed") + } + + azPollerCache.pollerResponseMap[*key] = value + + Log().Debugf("Successfully added to azure poller response cache; [key: %v, value: %v]", key, value) + + return nil +} + +func (azPollerCache *AzurePollerResponseCache) Get(key PollerKey) (PollerResponse, bool) { + Log().Debugf("Fetching from azure poller response cache; [key: %v].", key) + + if len(azPollerCache.pollerResponseMap) != 0 { + response, ok := azPollerCache.pollerResponseMap[key] + return response, ok + } + + return nil, false +} + +func (azPollerCache *AzurePollerResponseCache) Delete(key PollerKey) { + if len(azPollerCache.pollerResponseMap) != 0 { + delete(azPollerCache.pollerResponseMap, key) + } + + Log().Debugf("Successfully deleted from azure poller response cache; [key: %v].", key) +} + +type PollerVolumeCreateResponse struct { + *runtime.Poller[netapp.VolumesClientCreateOrUpdateResponse] +} + +func (p *PollerVolumeCreateResponse) Result(ctx context.Context) error { + if p != nil && p.Poller != nil { + Logc(ctx).Debug("Polling for volume create response") + + var rawResponse *http.Response + responseCtx := runtime.WithCaptureResponse(ctx, &rawResponse) + + _, err := p.PollUntilDone(responseCtx, &runtime.PollUntilDoneOptions{Frequency: 2 * time.Second}) + if err != nil { + Logc(ctx).WithError(err).Error("Got error when polling for volume create result.") + + if ok, azErr := IsANFPoolSizeTooSmallError(ctx, err); ok { + Logc(ctx).WithError(azErr).Error("Volume create failed due to low space in capacity pool.") + return errors.ResourceExhaustedError(azErr) + } + + return GetMessageFromError(ctx, err) + } + + Logc(ctx).Debug("Result received for volume create.") + } + + return nil +} + +type PollerSVCreateResponse struct { + *runtime.Poller[netapp.SubvolumesClientCreateResponse] +} +type PollerSVDeleteResponse struct { + *runtime.Poller[netapp.SubvolumesClientDeleteResponse] +} + func (p *PollerSVCreateResponse) Result(ctx context.Context) error { if p != nil && p.Poller != nil { var rawResponse *http.Response @@ -839,7 +969,6 @@ func (c Client) VolumeByID(ctx context.Context, id string) (*FileSystem, error) } Logc(ctx).WithFields(logFields).Debug("Found volume by ID.") - return c.newFileSystemFromVolume(ctx, &response.Volume) } @@ -859,9 +988,13 @@ func (c Client) VolumeExistsByID(ctx context.Context, id string) (bool, *FileSys // WaitForVolumeState watches for a desired volume state and returns when that state is achieved. func (c Client) WaitForVolumeState( ctx context.Context, filesystem *FileSystem, desiredState string, abortStates []string, - maxElapsedTime time.Duration, + maxElapsedTime time.Duration, operation Operation, ) (string, error) { volumeState := "" + pollerKey := PollerKey{ + ID: filesystem.CreationToken, + Operation: operation, + } checkVolumeState := func() error { f, err := c.VolumeByID(ctx, filesystem.ID) @@ -920,6 +1053,18 @@ func (c Client) WaitForVolumeState( if err := backoff.RetryNotify(checkVolumeState, stateBackoff, stateNotify); err != nil { if IsTerminalStateError(err) { Logc(ctx).WithError(err).Error("Volume reached terminal state.") + + // If a poller object exists for this volume in the VolumePollerCache cache, + // then fetch error details using the poller object, and then clear the cache entry + if poller, ok := VolumePollerCache.Get(pollerKey); ok { + if pollError := poller.Result(ctx); pollError != nil { + Logc(ctx).WithError(pollError).Errorf("Failed to create volume: %v; poller returned an error", filesystem.CreationToken) + err = pollError + } + + // Clear the cache + VolumePollerCache.Delete(pollerKey) + } } else { Logc(ctx).Warningf("Volume state was not %s after %3.2f seconds.", desiredState, stateBackoff.MaxElapsedTime.Seconds()) @@ -927,6 +1072,9 @@ func (c Client) WaitForVolumeState( return volumeState, err } + // If desired state is reached, then clear the poller response from the cache + VolumePollerCache.Delete(pollerKey) + Logc(ctx).WithField("desiredState", desiredState).Debug("Desired volume state reached.") return volumeState, nil @@ -1006,7 +1154,7 @@ func (c Client) CreateVolume(ctx context.Context, request *FilesystemCreateReque var rawResponse *http.Response responseCtx := runtime.WithCaptureResponse(ctx, &rawResponse) - _, err := c.sdkClient.VolumesClient.BeginCreateOrUpdate(responseCtx, + poller, err := c.sdkClient.VolumesClient.BeginCreateOrUpdate(responseCtx, resourceGroup, netappAccount, cPoolName, request.Name, newVol, nil) logFields["correlationID"] = GetCorrelationID(rawResponse) @@ -1018,6 +1166,18 @@ func (c Client) CreateVolume(ctx context.Context, request *FilesystemCreateReque Logc(ctx).WithFields(logFields).Info("Volume create request issued.") + // Store the poller object for this volume create request into a cache + pollerKey := PollerKey{ + ID: request.CreationToken, + Operation: Create, + } + + err = VolumePollerCache.Put(&pollerKey, &PollerVolumeCreateResponse{poller}) + if err != nil { + Logc(ctx).WithError(err).Errorf("Failed to add poller key %v to cache.", pollerKey) + return nil, err + } + // The volume doesn't exist yet, so forge the volume ID to enable conversion to a FileSystem struct newVolID := CreateVolumeID(c.config.SubscriptionID, resourceGroup, netappAccount, cPoolName, request.Name) newVol.ID = &newVolID @@ -2087,7 +2247,7 @@ func (c Client) ValidateFilePoolVolumes( volume.Location) } - if volume.SubvolumesEnabled == false { + if !volume.SubvolumesEnabled { return nil, fmt.Errorf("filePoolVolumes validation failed; volume '%s' does not support subvolumes", filePoolVolumeName) } @@ -2132,6 +2292,27 @@ func IsANFTooManyRequestsError(err error) bool { return false } +// IsANFPoolSizeTooSmallError checks whether an error returned from the ANF SDK contains a PoolSizeToolSmall code +func IsANFPoolSizeTooSmallError(ctx context.Context, inputErr error) (bool, error) { + if inputErr == nil { + return false, nil + } + + var azError *AzureError + err := parseAzureErrorFromInputError(ctx, inputErr) + if ok := errors.As(err, &azError); ok { + if len(azError.AzError.Details) > 0 { + for _, d := range azError.AzError.Details { + if d.Code == PoolSizeTooSmallError { + return true, azError + } + } + } + } + + return false, nil +} + // GetCorrelationIDFromError accepts an error returned from the ANF SDK and extracts the correlation // header, if present. func GetCorrelationIDFromError(err error) (id string) { @@ -2160,44 +2341,64 @@ func GetCorrelationID(response *http.Response) (id string) { return } -// GetMessageFromError accepts an error returned from the ANF SDK and extracts -// the error message. +// GetMessageFromError accepts an error returned from the ANF SDK and returns an appropriate error message. func GetMessageFromError(ctx context.Context, inputErr error) error { - type AzureError struct { - Error struct { - Code string `json:"code"` - Message string `json:"message"` - } `json:"error"` + // If input error is nil, return as is + if inputErr == nil { + return inputErr } - if inputErr == nil { + // Parse AzureError from input error. If not successful, then return input error as is + var azError *AzureError + err := parseAzureErrorFromInputError(ctx, inputErr) + if ok := errors.As(err, &azError); !ok { + Logc(ctx).WithError(err).Error("Failed to get azure error from error.") return inputErr } - if detailedErr, ok := inputErr.(*azcore.ResponseError); ok { - if detailedErr.RawResponse != nil && detailedErr.RawResponse.Body != nil { - bytes, readErr := io.ReadAll(detailedErr.RawResponse.Body) - if readErr != nil { - Logc(ctx).WithError(readErr).Error("Failed to read error body.") - return inputErr - } + return azError +} - var azureError AzureError - unmarshalErr := json.Unmarshal(bytes, &azureError) - if unmarshalErr != nil { - Logc(ctx).WithError(unmarshalErr).Error("Unmarshal error.") - } else { - errCode := azureError.Error.Code - errMessage := azureError.Error.Message +// parseAzureErrorFromInputError accepts an error returned from the ANF SDK and extracts the AzureError +func parseAzureErrorFromInputError(ctx context.Context, inputErr error) error { + if inputErr == nil { + return fmt.Errorf("no input error passed") + } - if errMessage != "" { - return fmt.Errorf("%v: %v", errCode, errMessage) - } - } + fields := LogFields{ + "Method": "getAzureErrorFromError", + "inputError": inputErr.Error(), + } + Logc(ctx).WithFields(fields).Trace(">>>> getAzureErrorFromError") + defer Logc(ctx).WithFields(fields).Trace("<<<< getAzureErrorFromError") + + detailedErr, ok := inputErr.(*azcore.ResponseError) + if !ok { + Logc(ctx).WithError(inputErr).Debug("Input error is not an azure error.") + return fmt.Errorf("input error is not an azure error: %w", inputErr) + } + + if detailedErr.RawResponse != nil && detailedErr.RawResponse.Body != nil { + defer detailedErr.RawResponse.Body.Close() + + bytes, readErr := io.ReadAll(detailedErr.RawResponse.Body) + if readErr != nil { + Logc(ctx).WithError(readErr).Error("Failed to read error body.") + return fmt.Errorf("failed to read error body: %w", readErr) } + + var azureError AzureError + unmarshalErr := json.Unmarshal(bytes, &azureError) + if unmarshalErr != nil { + Logc(ctx).WithError(unmarshalErr).Error("Failed to unmarshal azure error.") + return fmt.Errorf("failed to unmarshal azure error: %w", unmarshalErr) + } + + Logc(ctx).Debug("Successfully unmarshalled AzureError from input error.") + return &azureError } - return inputErr + return fmt.Errorf("azure response or body is nil: %w", inputErr) } // DerefString accepts a string pointer and returns the value of the string, or "" if the pointer is nil. diff --git a/storage_drivers/azure/api/azure_test.go b/storage_drivers/azure/api/azure_test.go index 63afd7d1b..5d9e79ff1 100644 --- a/storage_drivers/azure/api/azure_test.go +++ b/storage_drivers/azure/api/azure_test.go @@ -3,15 +3,146 @@ package api import ( + "context" + "io" "net/http" + "strings" "testing" "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/stretchr/testify/assert" "github.com/netapp/trident/utils/errors" ) +func TestAzureError_WithoutDetails(t *testing.T) { + err := &AzureError{ + AzError: struct { + Code string `json:"code"` + Message string `json:"message"` + Details []struct { + Code string `json:"code"` + Message string `json:"message"` + } `json:"details"` + }{ + Code: "BadRequest", + Message: "Failed to create volume as capacity pool is too small", + }, + } + + result := err.Error() + + assert.Equal(t, "BadRequest: Failed to create volume as capacity pool is too small", result) +} + +func TestAzureError_WithDetails(t *testing.T) { + err := &AzureError{ + AzError: struct { + Code string `json:"code"` + Message string `json:"message"` + Details []struct { + Code string `json:"code"` + Message string `json:"message"` + } `json:"details"` + }{ + Code: "BadRequest", + Message: "Failed to create volume as capacity pool is too small", + Details: []struct { + Code string `json:"code"` + Message string `json:"message"` + }([]struct { + Code string + Message string + }{ + {"PoolSizeTooSmall", "Failed to create volume as capacity pool is too small"}, + {"FakeMockError", "Fake mock error"}, + }), + }, + } + + result := err.Error() + + assert.Equal(t, "PoolSizeTooSmall: Failed to create volume as capacity pool is too small; FakeMockError: Fake mock error", result) +} + +func TestVolumePollerCache_Put_NilKey(t *testing.T) { + pollCache := AzurePollerResponseCache{} + + err := pollCache.Put(nil, &PollerVolumeCreateResponse{}) + + assert.NotNil(t, err, "expected error, got nil") +} + +func TestVolumePollerCache_Put(t *testing.T) { + key := PollerKey{ + ID: "mock-id", + Operation: Create, + } + + pollCache := AzurePollerResponseCache{} + + err := pollCache.Put(&key, &PollerVolumeCreateResponse{}) + + assert.Nil(t, err, "expected nil, got error") +} + +func TestVolumePollerCache_Get_KeyExists(t *testing.T) { + key := PollerKey{ + ID: "mock-id", + Operation: Create, + } + + pollCache := AzurePollerResponseCache{} + pollCache.Put(&key, &PollerVolumeCreateResponse{}) + + resp, ok := pollCache.Get(key) + + assert.True(t, ok, "expected true, got false") + assert.NotNil(t, resp, "expected a value, got nil") +} + +func TestVolumePollerCache_Get_KeyNotExists(t *testing.T) { + keyToFind := PollerKey{ + ID: "mock-id", + Operation: Create, + } + + // Case: Empty cache + pollCache := AzurePollerResponseCache{} + + resp, ok := pollCache.Get(keyToFind) + + assert.False(t, ok, "expected false, got true") + assert.Nil(t, resp, "expected nil, got a value") + + // Case: Key not present + pollCache.Put(&PollerKey{ + ID: "random-key", + Operation: Create, + }, &PollerVolumeCreateResponse{}) + + resp, ok = pollCache.Get(keyToFind) + + assert.False(t, ok, "expected false, got true") + assert.Nil(t, resp, "expected nil, got a value") +} + +func TestVolumePollerCache_Delete(t *testing.T) { + key := PollerKey{ + ID: "mock-id", + Operation: Create, + } + + var value PollerResponse + value = &PollerVolumeCreateResponse{} + + pollCache := AzurePollerResponseCache{} + pollCache.Put(&key, value) + + pollCache.Delete(key) +} + func TestCreateVirtualNetworkID(t *testing.T) { actual := CreateVirtualNetworkID("mySubscription", "myResourceGroup", "myVnet") @@ -584,6 +715,98 @@ func TestIsANFNotFoundError_OtherError(t *testing.T) { assert.False(t, result, "result should be false") } +func TestIsANFPoolSizeTooSmallError_Nil(t *testing.T) { + result, err := IsANFPoolSizeTooSmallError(context.Background(), nil) + + assert.False(t, result, "result should be false") + assert.Nil(t, err, "err should be nil") +} + +func TestIsANFPoolSizeTooSmallError_PoolSizeTooSmall(t *testing.T) { + body := ` + { + "error": { + "code": "BadRequest", + "message": "Mock PoolSizeTooSmall message", + "details": [ + { + "code": "PoolSizeTooSmall", + "message": "Mock PoolSizeTooSmall message" + } + ] + } + } + ` + err := &azcore.ResponseError{ + RawResponse: &http.Response{ + StatusCode: http.StatusBadRequest, + Body: io.NopCloser(strings.NewReader(body)), + }, + } + + result, azErr := IsANFPoolSizeTooSmallError(context.Background(), err) + + var azureerr *AzureError + ok := errors.As(azErr, &azureerr) + assert.True(t, result, "result should be true") + assert.True(t, ok, "expected azure error, got something else") +} + +func TestIsANFPoolSizeTooSmallError_SomeOtherError(t *testing.T) { + body := ` + { + "error": { + "code": "BadRequest", + "message": "Fake message", + "details": [ + { + "code": "FakeANFError", + "message": "Fake message" + } + ] + } + } + ` + err := &azcore.ResponseError{ + RawResponse: &http.Response{ + StatusCode: http.StatusBadRequest, + Body: io.NopCloser(strings.NewReader(body)), + }, + } + + result, azErr := IsANFPoolSizeTooSmallError(context.Background(), err) + + assert.False(t, result, "result should be false") + assert.Nil(t, azErr, "azure error should be nil") +} + +func TestIsANFPoolSizeTooSmallError_InvalidJsonBody(t *testing.T) { + body := ` + { + "error": { + "code": "BadRequest", + "message": "Mock PoolSizeTooSmall message", + "details": [ + { + "code": "PoolSizeTooSmall", + "message": "Mock PoolSizeTooSmall message" + }, + ] + } + } + ` + err := &azcore.ResponseError{ + RawResponse: &http.Response{ + StatusCode: http.StatusBadRequest, + Body: io.NopCloser(strings.NewReader(body)), + }, + } + + result, _ := IsANFPoolSizeTooSmallError(context.Background(), err) + + assert.False(t, result, "result should be false") +} + func TestGetCorrelationIDFromError_Nil(t *testing.T) { result := GetCorrelationIDFromError(nil) @@ -651,6 +874,193 @@ func TestGetCorrelationIDFromError_OtherError(t *testing.T) { assert.Equal(t, "", result) } +func TestGetMessageFromError_Nil(t *testing.T) { + result := GetMessageFromError(context.Background(), nil) + + assert.Nil(t, result, "result should be nil") +} + +func TestGetMessageFromError_AzureErrorWithDetails(t *testing.T) { + body := ` + { + "error": { + "code": "BadRequest", + "message": "Fake message", + "details": [ + { + "code": "FakeANFError1", + "message": "Fake message1" + }, + { + "code": "FakeANFError2", + "message": "Fake message2" + } + ] + } + } + ` + err := &azcore.ResponseError{ + RawResponse: &http.Response{ + StatusCode: http.StatusBadRequest, + Body: io.NopCloser(strings.NewReader(body)), + }, + } + + result := GetMessageFromError(context.Background(), err) + + assert.Equal(t, "FakeANFError1: Fake message1; FakeANFError2: Fake message2", result.Error(), "error message not as expected") +} + +func TestGetMessageFromError_NonAzureError(t *testing.T) { + err := errors.New("failed") + + result := GetMessageFromError(context.Background(), err) + + assert.Equal(t, "failed", result.Error(), "error message not as expected") +} + +func TestGetMessageFromError_InvalidJsonBody(t *testing.T) { + body := ` + { + "error": { + "code": "BadRequest", + "message": "Fake message", + "details": [ + { + "code": "FakeANFError", + "message": "Fake message", + } + ] + } + } + ` + err := &azcore.ResponseError{ + RawResponse: &http.Response{ + StatusCode: http.StatusBadRequest, + Body: io.NopCloser(strings.NewReader(body)), + }, + } + + result := GetMessageFromError(context.Background(), err) + + assert.NotEqual(t, "BadRequest: Fake message; FakeANFError: Fake message", result.Error(), "error message not as expected") +} + +func TestGetAzureErrorFromError_Nil(t *testing.T) { + err := parseAzureErrorFromInputError(context.Background(), nil) + + assert.NotNil(t, err, "error is nil") +} + +func TestGetAzureErrorFromError_InvalidJsonBody(t *testing.T) { + body := ` + { + "error": { + "code": "BadRequest", + "message": "Fake message", + "details": [ + { + "code": "FakeANFError", + "message": "Fake message", + } + ] + } + } + ` + inputErr := &azcore.ResponseError{ + RawResponse: &http.Response{ + StatusCode: http.StatusBadRequest, + Body: io.NopCloser(strings.NewReader(body)), + }, + } + + err := parseAzureErrorFromInputError(context.Background(), inputErr) + + assert.NotNil(t, err, "error is nil") +} + +func TestGetAzureErrorFromError_UnmarshallingError(t *testing.T) { + body := ` + { + "error": { + "code": 1234, + "message": "Fake message", + "details": [ + { + "code": "FakeANFError", + "message": "Fake message" + } + ] + } + } + ` + inputErr := &azcore.ResponseError{ + RawResponse: &http.Response{ + StatusCode: http.StatusBadRequest, + Body: io.NopCloser(strings.NewReader(body)), + }, + } + + err := parseAzureErrorFromInputError(context.Background(), inputErr) + + assert.NotNil(t, err, "error is nil") +} + +func TestGetAzureErrorFromError_Success(t *testing.T) { + body := ` + { + "error": { + "code": "BadRequest", + "message": "Failed to create volume as capacity pool is too small", + "details": [ + { + "code": "PoolSizeTooSmall", + "message": "Failed to create volume as capacity pool is too small" + } + ] + } + } + ` + inputErr := &azcore.ResponseError{ + RawResponse: &http.Response{ + StatusCode: http.StatusBadRequest, + Body: io.NopCloser(strings.NewReader(body)), + }, + } + + expected := &AzureError{ + AzError: struct { + Code string `json:"code"` + Message string `json:"message"` + Details []struct { + Code string `json:"code"` + Message string `json:"message"` + } `json:"details"` + }{ + Code: "BadRequest", + Message: "Failed to create volume as capacity pool is too small", + Details: []struct { + Code string `json:"code"` + Message string `json:"message"` + }([]struct { + Code string + Message string + }{{"PoolSizeTooSmall", "Failed to create volume as capacity pool is too small"}}), + }, + } + + err := parseAzureErrorFromInputError(context.Background(), inputErr) + + var azError *AzureError + ok := errors.As(err, &azError) + + assert.True(t, ok, "error is not of type AzureError") + assert.Equal(t, expected.AzError.Code, azError.AzError.Code, "error code not equal") + assert.Equal(t, expected.AzError.Message, azError.AzError.Message, "error message not equal") + assert.Equal(t, expected.AzError.Details[0].Code, azError.AzError.Details[0].Code, "error details code not equal") + assert.Equal(t, expected.AzError.Details[0].Message, azError.AzError.Details[0].Message, "error details message not equal") +} + func TestDerefString(t *testing.T) { s := "test" diff --git a/storage_drivers/azure/api/types.go b/storage_drivers/azure/api/types.go index 0d7bd4574..42a2f8725 100644 --- a/storage_drivers/azure/api/types.go +++ b/storage_drivers/azure/api/types.go @@ -33,7 +33,7 @@ type Azure interface { VolumeExistsByCreationToken(context.Context, string) (bool, *FileSystem, error) VolumeByID(context.Context, string) (*FileSystem, error) VolumeExistsByID(context.Context, string) (bool, *FileSystem, error) - WaitForVolumeState(context.Context, *FileSystem, string, []string, time.Duration) (string, error) + WaitForVolumeState(context.Context, *FileSystem, string, []string, time.Duration, Operation) (string, error) CreateVolume(context.Context, *FilesystemCreateRequest) (*FileSystem, error) ModifyVolume(context.Context, *FileSystem, map[string]string, *string, *bool, *ExportRule) error ResizeVolume(context.Context, *FileSystem, int64) error diff --git a/storage_drivers/azure/azure_anf.go b/storage_drivers/azure/azure_anf.go index 6372f6c83..48e540974 100644 --- a/storage_drivers/azure/azure_anf.go +++ b/storage_drivers/azure/azure_anf.go @@ -1013,7 +1013,7 @@ func (d *NASStorageDriver) Create( volConfig.InternalID = volume.ID // Wait for creation to complete so that the mount targets are available - return d.waitForVolumeCreate(ctx, volume) + return d.waitForVolumeCreate(ctx, volume, api.Create) } return createErrors @@ -1215,7 +1215,7 @@ func (d *NASStorageDriver) CreateClone( cloneVolConfig.InternalID = clone.ID // Wait for creation to complete so that the mount targets are available - return d.waitForVolumeCreate(ctx, clone) + return d.waitForVolumeCreate(ctx, clone, api.Create) } // Import finds an existing volume and makes it available for containers. If ImportNotManaged is false, the @@ -1359,7 +1359,7 @@ func (d *NASStorageDriver) Import(ctx context.Context, volConfig *storage.Volume } if _, err = d.SDK.WaitForVolumeState( - ctx, volume, api.StateAvailable, []string{api.StateError}, d.defaultTimeout()); err != nil { + ctx, volume, api.StateAvailable, []string{api.StateError}, d.defaultTimeout(), api.Import); err != nil { return fmt.Errorf("could not import volume %s; %v", originalName, err) } } @@ -1416,9 +1416,9 @@ func (d *NASStorageDriver) updateTelemetryLabels(ctx context.Context, volume *ap // waitForVolumeCreate waits for volume creation to complete by reaching the Available state. If the // volume reaches a terminal state (Error), the volume is deleted. If the wait times out and the volume // is still creating, a VolumeCreatingError is returned so the caller may try again. -func (d *NASStorageDriver) waitForVolumeCreate(ctx context.Context, volume *api.FileSystem) error { +func (d *NASStorageDriver) waitForVolumeCreate(ctx context.Context, volume *api.FileSystem, operation api.Operation) error { state, err := d.SDK.WaitForVolumeState( - ctx, volume, api.StateAvailable, []string{api.StateError}, d.volumeCreateTimeout) + ctx, volume, api.StateAvailable, []string{api.StateError}, d.volumeCreateTimeout, operation) if err != nil { logFields := LogFields{"volume": volume.CreationToken} @@ -1432,7 +1432,7 @@ func (d *NASStorageDriver) waitForVolumeCreate(ctx context.Context, volume *api. case api.StateDeleting: // Wait for deletion to complete _, errDelete := d.SDK.WaitForVolumeState( - ctx, volume, api.StateDeleted, []string{api.StateError}, d.defaultTimeout()) + ctx, volume, api.StateDeleted, []string{api.StateError}, d.defaultTimeout(), operation) if errDelete != nil { Logc(ctx).WithFields(logFields).WithError(errDelete).Error( "Volume could not be cleaned up and must be manually deleted.") @@ -1448,6 +1448,9 @@ func (d *NASStorageDriver) waitForVolumeCreate(ctx context.Context, volume *api. Logc(ctx).WithField("volume", volume.Name).Info("Volume deleted.") } + Logc(ctx).WithFields(logFields).Debugf("Volume is in %s state.", state) + return err + case api.StateMoving, api.StateReverting: fallthrough @@ -1487,7 +1490,7 @@ func (d *NASStorageDriver) Destroy(ctx context.Context, volConfig *storage.Volum } else if extantVolume.ProvisioningState == api.StateDeleting { // This is a retry, so give it more time before giving up again. _, err = d.SDK.WaitForVolumeState( - ctx, extantVolume, api.StateDeleted, []string{api.StateError}, d.volumeCreateTimeout) + ctx, extantVolume, api.StateDeleted, []string{api.StateError}, d.volumeCreateTimeout, api.Delete) return err } @@ -1499,7 +1502,7 @@ func (d *NASStorageDriver) Destroy(ctx context.Context, volConfig *storage.Volum Logc(ctx).WithField("volume", extantVolume.Name).Info("Volume deleted.") // Wait for deletion to complete - _, err = d.SDK.WaitForVolumeState(ctx, extantVolume, api.StateDeleted, []string{api.StateError}, d.defaultTimeout()) + _, err = d.SDK.WaitForVolumeState(ctx, extantVolume, api.StateDeleted, []string{api.StateError}, d.defaultTimeout(), api.Delete) return err } @@ -1789,7 +1792,7 @@ func (d *NASStorageDriver) RestoreSnapshot( // Wait for snapshot deletion to complete _, err = d.SDK.WaitForVolumeState(ctx, volume, api.StateAvailable, - []string{api.StateError, api.StateDeleting, api.StateDeleted}, api.DefaultSDKTimeout, + []string{api.StateError, api.StateDeleting, api.StateDeleted}, api.DefaultSDKTimeout, api.Restore, ) return err } diff --git a/storage_drivers/azure/azure_anf_subvolume.go b/storage_drivers/azure/azure_anf_subvolume.go index cb92f59ac..7732fefac 100644 --- a/storage_drivers/azure/azure_anf_subvolume.go +++ b/storage_drivers/azure/azure_anf_subvolume.go @@ -42,24 +42,8 @@ var ( subvolumeNameRegex = regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9-]{0,39}$`) subvolumeSnapshotNameRegex = regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9-]{0,44}$`) subvolumeCreationTokenRegex = regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9-]{0,63}$`) - - pollerResponseCache = make(map[PollerKey]api.PollerResponse) -) - -type Operation int64 - -const ( - Create Operation = iota - Delete - Update - Restore ) -type PollerKey struct { - ID string - Operation Operation -} - // key is subvolume ID and value can be snapshot ID or empty var subvolumesToDelete map[string]string @@ -719,12 +703,12 @@ func (d *NASBlockStorageDriver) Create( }).Warning("Subvolume already exists.") // Get the reference object - pollerKey := PollerKey{ + pollerKey := api.PollerKey{ ID: extantSubvolume.Name, - Operation: Create, + Operation: api.Create, } - poller := pollerResponseCache[pollerKey] + poller, _ := api.VolumePollerCache.Get(pollerKey) // Wait for creation to complete if err = d.waitForSubvolumeCreate(ctx, extantSubvolume, poller, pollerKey.Operation, true); err != nil { @@ -789,12 +773,16 @@ func (d *NASBlockStorageDriver) Create( volConfig.InternalID = subvolume.ID // Save the Poller's reference for later uses (if needed) - pollerKey := PollerKey{ + pollerKey := api.PollerKey{ ID: subvolume.Name, - Operation: Create, + Operation: api.Create, } - pollerResponseCache[pollerKey] = poller + err = api.VolumePollerCache.Put(&pollerKey, poller) + if err != nil { + Logc(ctx).WithError(err).Errorf("Failed to add poller key %v to cache.", pollerKey) + return err + } // Wait for creation to complete return d.waitForSubvolumeCreate(ctx, subvolume, poller, pollerKey.Operation, true) @@ -866,12 +854,12 @@ func (d *NASBlockStorageDriver) CreateClone( }).Warning("Subvolume already exists.") // Get the reference object - pollerKey := PollerKey{ + pollerKey := api.PollerKey{ ID: extantSubvolume.Name, - Operation: Create, + Operation: api.Create, } - poller := pollerResponseCache[pollerKey] + poller, _ := api.VolumePollerCache.Get(pollerKey) // Wait for creation to complete if err = d.waitForSubvolumeCreate(ctx, extantSubvolume, poller, pollerKey.Operation, true); err != nil { @@ -908,12 +896,17 @@ func (d *NASBlockStorageDriver) CreateClone( volConfig.InternalID = subvolume.ID // Save the Poller's reference for later uses (if needed) - pollerKey := PollerKey{ + pollerKey := api.PollerKey{ ID: subvolume.Name, - Operation: Create, + Operation: api.Create, } - pollerResponseCache[pollerKey] = poller + err = api.VolumePollerCache.Put(&pollerKey, poller) + if err != nil { + Logc(ctx).WithError(err).Errorf("Failed to add poller key %v to cache.", pollerKey) + + return err + } // Wait for creation to complete return d.waitForSubvolumeCreate(ctx, subvolume, poller, pollerKey.Operation, true) @@ -985,7 +978,7 @@ func (d *NASBlockStorageDriver) Rename(ctx context.Context, name, newName string // is still creating, a VolumeCreatingError is returned so the caller may try again. func (d *NASBlockStorageDriver) waitForSubvolumeCreate( ctx context.Context, subvolume *api.Subvolume, - poller api.PollerResponse, operation Operation, handleErrorInFollowup bool, + poller api.PollerResponse, operation api.Operation, handleErrorInFollowup bool, ) error { var pollForError bool @@ -1033,12 +1026,12 @@ func (d *NASBlockStorageDriver) waitForSubvolumeCreate( // If here, it means volume might be successful, or in deleting, error, moving or unexpected state, // and not in creating state, so it should be safe to remove it from futures cache - pollerKey := PollerKey{ + pollerKey := api.PollerKey{ ID: subvolume.Name, Operation: operation, } - delete(pollerResponseCache, pollerKey) + api.VolumePollerCache.Delete(pollerKey) if pollForError && poller != nil { if err != nil && state == api.StateError { @@ -1388,12 +1381,16 @@ func (d *NASBlockStorageDriver) CreateSnapshot( createdAt := time.Now() // Save the Poller's reference for later uses (if needed) - pollerKey := PollerKey{ + pollerKey := api.PollerKey{ ID: subvolume.Name, - Operation: Create, + Operation: api.Create, } - pollerResponseCache[pollerKey] = poller + err = api.VolumePollerCache.Put(&pollerKey, poller) + if err != nil { + Logc(ctx).WithError(err).Errorf("Failed to add poller key %v to cache.", pollerKey) + return nil, err + } if err = d.waitForSubvolumeCreate(ctx, subvolume, poller, pollerKey.Operation, false); err != nil { return nil, err @@ -1467,14 +1464,14 @@ func (d *NASBlockStorageDriver) RestoreSnapshot( } // Check if subvolume restore already in progress - pollerKey := PollerKey{ + pollerKey := api.PollerKey{ ID: internalVolName, - Operation: Restore, + Operation: api.Restore, } - poller, ok := pollerResponseCache[pollerKey] + poller, _ := api.VolumePollerCache.Get(pollerKey) - if !ok { + if poller == nil { // Create name of the volume where this `-og` subvolume will live filePoolVolume := api.CreateVolumeFullName(resourceGroup, netappAccount, cPoolName, volumeName) @@ -1509,12 +1506,16 @@ func (d *NASBlockStorageDriver) RestoreSnapshot( } // Save the Poller's reference for later uses (if needed) - pollerKey = PollerKey{ + pollerKey = api.PollerKey{ ID: tempSubvolume.Name, - Operation: Create, + Operation: api.Create, } - pollerResponseCache[pollerKey] = poller + err = api.VolumePollerCache.Put(&pollerKey, poller) + if err != nil { + Logc(ctx).WithError(err).Errorf("Failed to add poller key %v to cache.", pollerKey) + return err + } if err = d.waitForSubvolumeCreate(ctx, tempSubvolume, poller, pollerKey.Operation, false); err != nil { if errors.IsVolumeCreatingError(err) { @@ -1564,12 +1565,16 @@ func (d *NASBlockStorageDriver) RestoreSnapshot( } // Save the Poller's reference for later uses (if needed) - pollerKey = PollerKey{ + pollerKey = api.PollerKey{ ID: subvolume.Name, - Operation: Restore, + Operation: api.Restore, } - pollerResponseCache[pollerKey] = poller + err = api.VolumePollerCache.Put(&pollerKey, poller) + if err != nil { + Logc(ctx).WithError(err).Errorf("Failed to add poller key %v to cache.", pollerKey) + return err + } } // Create Subvolume Object diff --git a/storage_drivers/azure/azure_anf_subvolume_test.go b/storage_drivers/azure/azure_anf_subvolume_test.go index a36520aab..b8d8f7749 100644 --- a/storage_drivers/azure/azure_anf_subvolume_test.go +++ b/storage_drivers/azure/azure_anf_subvolume_test.go @@ -2084,7 +2084,7 @@ func TestSubvolumeWaitForSubvolumeCreate_Creating(t *testing.T) { mockAPI.EXPECT().WaitForSubvolumeState(ctx, subVolume, api.StateAvailable, []string{api.StateError}, driver.volumeCreateTimeout).Return(state, errFailed).Times(1) - result := driver.waitForSubvolumeCreate(ctx, subVolume, nil, Create, true) + result := driver.waitForSubvolumeCreate(ctx, subVolume, nil, api.Create, true) assert.Error(t, result, "subvolume creation is complete") } } @@ -2104,7 +2104,7 @@ func TestSubvolumeWaitForSubvolumeCreate_DeletingNotCompleted(t *testing.T) { mockAPI.EXPECT().WaitForSubvolumeState(ctx, subVolume, api.StateDeleted, []string{api.StateError}, driver.defaultTimeout()).Return(api.StateDeleted, nil).Times(1) - result := driver.waitForSubvolumeCreate(ctx, subVolume, nil, Create, true) + result := driver.waitForSubvolumeCreate(ctx, subVolume, nil, api.Create, true) assert.Nil(t, result, "subvolume creation is complete") } @@ -2123,7 +2123,7 @@ func TestSubvolumeWaitForSubvolumeCreate_DeletingCompleted(t *testing.T) { mockAPI.EXPECT().WaitForSubvolumeState(ctx, subVolume, api.StateDeleted, []string{api.StateError}, driver.defaultTimeout()).Return(api.StateDeleted, errFailed).Times(1) - result := driver.waitForSubvolumeCreate(ctx, subVolume, nil, Create, true) + result := driver.waitForSubvolumeCreate(ctx, subVolume, nil, api.Create, true) assert.Nil(t, result, "subvolume creation is complete") } @@ -2143,7 +2143,7 @@ func TestSubvolumeWaitForSubvolumeCreate_ErrorDelete(t *testing.T) { poller := api.PollerSVCreateResponse{} - result := driver.waitForSubvolumeCreate(ctx, subVolume, &poller, Create, true) + result := driver.waitForSubvolumeCreate(ctx, subVolume, &poller, api.Create, true) assert.Nil(t, result, "subvolume creation is complete") } @@ -2163,7 +2163,7 @@ func TestSubvolumeWaitForSubvolumeCreate_ErrorDeleteFailed(t *testing.T) { poller := api.PollerSVCreateResponse{} - result := driver.waitForSubvolumeCreate(ctx, subVolume, &poller, Create, true) + result := driver.waitForSubvolumeCreate(ctx, subVolume, &poller, api.Create, true) assert.Nil(t, result, "subvolume creation is complete") } @@ -2182,7 +2182,7 @@ func TestSubvolumeWaitForSubvolumeCreate_OtherStates(t *testing.T) { poller := api.PollerSVCreateResponse{} - result := driver.waitForSubvolumeCreate(ctx, subVolume, &poller, Create, true) + result := driver.waitForSubvolumeCreate(ctx, subVolume, &poller, api.Create, true) assert.Nil(t, result, "subvolume creation is complete") } } diff --git a/storage_drivers/azure/azure_anf_test.go b/storage_drivers/azure/azure_anf_test.go index f1ef75018..7cec9977c 100644 --- a/storage_drivers/azure/azure_anf_test.go +++ b/storage_drivers/azure/azure_anf_test.go @@ -1592,7 +1592,7 @@ func TestCreate_NFSVolume(t *testing.T) { api.ServiceLevelUltra).Return([]*api.CapacityPool{capacityPool}).Times(1) mockAPI.EXPECT().CreateVolume(ctx, createRequest).Return(filesystem, nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateAvailable, nil).Times(1) + driver.volumeCreateTimeout, api.Create).Return(api.StateAvailable, nil).Times(1) result := driver.Create(ctx, volConfig, storagePool, nil) @@ -1637,7 +1637,7 @@ func TestCreate_NFSVolume_MultipleCapacityPools_FirstSucceeds(t *testing.T) { api.ServiceLevelUltra).Return(capacityPools).Times(1) mockAPI.EXPECT().CreateVolume(ctx, createRequest).Return(filesystem, nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateAvailable, nil).Times(1) + driver.volumeCreateTimeout, api.Create).Return(api.StateAvailable, nil).Times(1) result := driver.Create(ctx, volConfig, storagePool, nil) @@ -1693,7 +1693,7 @@ func TestCreate_NFSVolume_MultipleCapacityPools_SecondSucceeds(t *testing.T) { mockAPI.EXPECT().CreateVolume(ctx, &createRequest1).Return(nil, errFailed).Times(1) mockAPI.EXPECT().CreateVolume(ctx, &createRequest2).Return(filesystem, nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateAvailable, nil).Times(1) + driver.volumeCreateTimeout, api.Create).Return(api.StateAvailable, nil).Times(1) result := driver.Create(ctx, volConfig, storagePool, nil) @@ -1807,7 +1807,7 @@ func TestCreate_NFSVolume_Kerberos_type5(t *testing.T) { api.ServiceLevelUltra).Return([]*api.CapacityPool{capacityPool}).Times(1) mockAPI.EXPECT().CreateVolume(ctx, createRequest).Return(filesystem, nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateAvailable, nil).Times(1) + driver.volumeCreateTimeout, api.Create).Return(api.StateAvailable, nil).Times(1) result := driver.Create(ctx, volConfig, storagePool, nil) @@ -1910,7 +1910,7 @@ func TestCreate_NFSVolume_Kerberos_type5I(t *testing.T) { api.ServiceLevelUltra).Return([]*api.CapacityPool{capacityPool}).Times(1) mockAPI.EXPECT().CreateVolume(ctx, createRequest).Return(filesystem, nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateAvailable, nil).Times(1) + driver.volumeCreateTimeout, api.Create).Return(api.StateAvailable, nil).Times(1) result := driver.Create(ctx, volConfig, storagePool, nil) @@ -2012,7 +2012,7 @@ func TestCreate_NFSVolume_Kerberos_type5P(t *testing.T) { api.ServiceLevelUltra).Return([]*api.CapacityPool{capacityPool}).Times(1) mockAPI.EXPECT().CreateVolume(ctx, createRequest).Return(filesystem, nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateAvailable, nil).Times(1) + driver.volumeCreateTimeout, api.Create).Return(api.StateAvailable, nil).Times(1) result := driver.Create(ctx, volConfig, storagePool, nil) @@ -2355,7 +2355,7 @@ func TestCreate_ZeroSize(t *testing.T) { api.ServiceLevelUltra).Return([]*api.CapacityPool{capacityPool}).Times(1) mockAPI.EXPECT().CreateVolume(ctx, createRequest).Return(filesystem, nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateAvailable, nil).Times(1) + driver.volumeCreateTimeout, api.Create).Return(api.StateAvailable, nil).Times(1) result := driver.Create(ctx, volConfig, storagePool, nil) @@ -2560,7 +2560,7 @@ func TestCreate_NFSVolume_DefaultMountOptions(t *testing.T) { api.ServiceLevelUltra).Return([]*api.CapacityPool{capacityPool}).Times(1) mockAPI.EXPECT().CreateVolume(ctx, createRequest).Return(filesystem, nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateAvailable, nil).Times(1) + driver.volumeCreateTimeout, api.Create).Return(api.StateAvailable, nil).Times(1) result := driver.Create(ctx, volConfig, storagePool, nil) @@ -2601,7 +2601,7 @@ func TestCreate_NFSVolume_VolConfigMountOptions(t *testing.T) { api.ServiceLevelUltra).Return([]*api.CapacityPool{capacityPool}).Times(1) mockAPI.EXPECT().CreateVolume(ctx, createRequest).Return(filesystem, nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateAvailable, nil).Times(1) + driver.volumeCreateTimeout, api.Create).Return(api.StateAvailable, nil).Times(1) result := driver.Create(ctx, volConfig, storagePool, nil) @@ -2661,7 +2661,7 @@ func TestCreate_NFSVolume_BelowANFMinimumSize(t *testing.T) { api.ServiceLevelUltra).Return([]*api.CapacityPool{capacityPool}).Times(1) mockAPI.EXPECT().CreateVolume(ctx, createRequest).Return(filesystem, nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateAvailable, nil).Times(1) + driver.volumeCreateTimeout, api.Create).Return(api.StateAvailable, nil).Times(1) result := driver.Create(ctx, volConfig, storagePool, nil) @@ -2764,7 +2764,7 @@ func TestCreate_SMBVolume(t *testing.T) { api.ServiceLevelUltra).Return([]*api.CapacityPool{capacityPool}).Times(1) mockAPI.EXPECT().CreateVolume(ctx, createRequest).Return(filesystem, nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateAvailable, nil).Times(1) + driver.volumeCreateTimeout, api.Create).Return(api.StateAvailable, nil).Times(1) result := driver.Create(ctx, volConfig, storagePool, nil) @@ -2828,7 +2828,7 @@ func TestCreate_SMBVolume_BelowANFMinimumSize(t *testing.T) { api.ServiceLevelUltra).Return([]*api.CapacityPool{capacityPool}).Times(1) mockAPI.EXPECT().CreateVolume(ctx, createRequest).Return(filesystem, nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateAvailable, nil).Times(1) + driver.volumeCreateTimeout, api.Create).Return(api.StateAvailable, nil).Times(1) result := driver.Create(ctx, volConfig, storagePool, nil) @@ -3042,7 +3042,7 @@ func TestCreateClone_NoSnapshot(t *testing.T) { mockAPI.EXPECT().SnapshotForVolume(ctx, sourceFilesystem, gomock.Any()).Return(snapshot, nil).Times(1) mockAPI.EXPECT().CreateVolume(ctx, createRequest).Return(cloneFilesystem, nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, cloneFilesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateAvailable, nil).Times(1) + driver.volumeCreateTimeout, api.Create).Return(api.StateAvailable, nil).Times(1) result := driver.CreateClone(ctx, sourceVolConfig, cloneVolConfig, nil) @@ -3073,7 +3073,7 @@ func TestCreateClone_Snapshot(t *testing.T) { mockAPI.EXPECT().SnapshotForVolume(ctx, sourceFilesystem, "snap1").Return(snapshot, nil).Times(1) mockAPI.EXPECT().CreateVolume(ctx, createRequest).Return(cloneFilesystem, nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, cloneFilesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateAvailable, nil).Times(1) + driver.volumeCreateTimeout, api.Create).Return(api.StateAvailable, nil).Times(1) result := driver.CreateClone(ctx, sourceVolConfig, cloneVolConfig, nil) @@ -3283,32 +3283,34 @@ func TestCreateClone_VolumeExistsCheckFailed(t *testing.T) { assert.Equal(t, "", cloneVolConfig.InternalID, "internal ID set on volConfig") } -func TestCreateClone_VolumeExistsCreating(t *testing.T) { - mockAPI, driver := newMockANFDriver(t) - driver.Config.BackendName = "anf" - driver.Config.ServiceLevel = api.ServiceLevelUltra +/* + func TestCreateClone_VolumeExistsCreating(t *testing.T) { + mockAPI, driver := newMockANFDriver(t) + driver.Config.BackendName = "anf" + driver.Config.ServiceLevel = api.ServiceLevelUltra - driver.populateConfigurationDefaults(ctx, &driver.Config) - driver.initializeStoragePools(ctx) - driver.initializeTelemetry(ctx, BackendUUID) + driver.populateConfigurationDefaults(ctx, &driver.Config) + driver.initializeStoragePools(ctx) + driver.initializeTelemetry(ctx, BackendUUID) - storagePool := driver.pools["anf_pool"] + storagePool := driver.pools["anf_pool"] - sourceVolConfig, cloneVolConfig, _, sourceFilesystem, cloneFilesystem, _ := getStructsForCreateClone(ctx, driver, - storagePool) - cloneFilesystem.ProvisioningState = api.StateCreating + sourceVolConfig, cloneVolConfig, _, sourceFilesystem, cloneFilesystem, _ := getStructsForCreateClone(ctx, driver, + storagePool) + cloneFilesystem.ProvisioningState = api.StateCreating - mockAPI.EXPECT().RefreshAzureResources(ctx).Return(nil).Times(1) - mockAPI.EXPECT().Volume(ctx, sourceVolConfig).Return(sourceFilesystem, nil).Times(1) - mockAPI.EXPECT().VolumeExistsByID(ctx, cloneFilesystem.ID).Return(true, cloneFilesystem, nil).Times(1) + mockAPI.EXPECT().RefreshAzureResources(ctx).Return(nil).Times(1) + mockAPI.EXPECT().Volume(ctx, sourceVolConfig).Return(sourceFilesystem, nil).Times(1) + mockAPI.EXPECT().VolumeExistsByID(ctx, cloneFilesystem.ID).Return(true, cloneFilesystem, nil).Times(1) + mockAPI.EXPECT().WaitForVolumeState(ctx, cloneFilesystem, api.StateAvailable, []string{api.StateError}, + driver.volumeCreateTimeout).Return(api.StateCreating, nil).Times(1) - result := driver.CreateClone(ctx, sourceVolConfig, cloneVolConfig, nil) + result := driver.CreateClone(ctx, sourceVolConfig, cloneVolConfig, nil) - assert.Error(t, result, "expected error") - assert.IsType(t, - errors.VolumeCreatingError(""), result, "not VolumeCreatingError") - assert.Equal(t, "", cloneVolConfig.InternalID, "internal ID set on volConfig") -} + assert.Error(t, result, "expected error") + assert.IsType(t, drivers.NewVolumeExistsError(""), result, "not VolumeExistsError") + assert.Equal(t, "", cloneVolConfig.InternalID, "internal ID set on volConfig") + } func TestCreateClone_VolumeExists(t *testing.T) { mockAPI, driver := newMockANFDriver(t) @@ -3328,6 +3330,8 @@ func TestCreateClone_VolumeExists(t *testing.T) { mockAPI.EXPECT().RefreshAzureResources(ctx).Return(nil).Times(1) mockAPI.EXPECT().Volume(ctx, sourceVolConfig).Return(sourceFilesystem, nil).Times(1) mockAPI.EXPECT().VolumeExistsByID(ctx, cloneFilesystem.ID).Return(true, cloneFilesystem, nil).Times(1) + mockAPI.EXPECT().WaitForVolumeState(ctx, cloneFilesystem, api.StateAvailable, []string{api.StateError}, + driver.volumeCreateTimeout).Return(api.StateAvailable, nil).Times(1) result := driver.CreateClone(ctx, sourceVolConfig, cloneVolConfig, nil) @@ -3335,6 +3339,7 @@ func TestCreateClone_VolumeExists(t *testing.T) { assert.IsType(t, drivers.NewVolumeExistsError(""), result, "not VolumeExistsError") assert.Equal(t, "", cloneVolConfig.InternalID, "internal ID set on volConfig") } +*/ func TestCreateClone_SnapshotNotFound(t *testing.T) { mockAPI, driver := newMockANFDriver(t) @@ -3654,7 +3659,7 @@ func TestImport_Managed(t *testing.T) { mockAPI.EXPECT().ModifyVolume(ctx, originalFilesystem, expectedLabels, &expectedUnixPermissions, &snapshotDirAccess, &exportRule).Return(nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, originalFilesystem, api.StateAvailable, []string{api.StateError}, - driver.defaultTimeout()).Return(api.StateAvailable, nil).Times(1) + driver.defaultTimeout(), api.Import).Return(api.StateAvailable, nil).Times(1) result := driver.Import(ctx, volConfig, originalName) @@ -3710,7 +3715,7 @@ func TestImport_ManagedWithKerberos5(t *testing.T) { mockAPI.EXPECT().ModifyVolume(ctx, originalFilesystem, expectedLabels, &expectedUnixPermissions, &snapshotDirAccess, &exportRule).Return(nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, originalFilesystem, api.StateAvailable, []string{api.StateError}, - driver.defaultTimeout()).Return(api.StateAvailable, nil).Times(1) + driver.defaultTimeout(), api.Import).Return(api.StateAvailable, nil).Times(1) result := driver.Import(ctx, volConfig, originalName) @@ -3802,7 +3807,7 @@ func TestImport_ManagedWithKerberos5I(t *testing.T) { mockAPI.EXPECT().ModifyVolume(ctx, originalFilesystem, expectedLabels, &expectedUnixPermissions, &snapshotDirAccess, &exportRule).Return(nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, originalFilesystem, api.StateAvailable, []string{api.StateError}, - driver.defaultTimeout()).Return(api.StateAvailable, nil).Times(1) + driver.defaultTimeout(), api.Import).Return(api.StateAvailable, nil).Times(1) result := driver.Import(ctx, volConfig, originalName) @@ -3859,7 +3864,7 @@ func TestImport_ManagedWithKerberos5P(t *testing.T) { mockAPI.EXPECT().ModifyVolume(ctx, originalFilesystem, expectedLabels, &expectedUnixPermissions, &snapshotDirAccess, &exportRule).Return(nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, originalFilesystem, api.StateAvailable, []string{api.StateError}, - driver.defaultTimeout()).Return(api.StateAvailable, nil).Times(1) + driver.defaultTimeout(), api.Import).Return(api.StateAvailable, nil).Times(1) result := driver.Import(ctx, volConfig, originalName) @@ -4020,7 +4025,7 @@ func TestImport_ManagedWithSnapshotDir(t *testing.T) { mockAPI.EXPECT().ModifyVolume(ctx, originalFilesystem, expectedLabels, &expectedUnixPermissions, &snapshotDirAccess, &exportRule).Return(nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, originalFilesystem, api.StateAvailable, []string{api.StateError}, - driver.defaultTimeout()).Return(api.StateAvailable, nil).Times(1) + driver.defaultTimeout(), api.Import).Return(api.StateAvailable, nil).Times(1) result := driver.Import(ctx, volConfig, originalName) @@ -4065,7 +4070,7 @@ func TestImport_ManagedWithSnapshotDirFalse(t *testing.T) { mockAPI.EXPECT().ModifyVolume(ctx, originalFilesystem, expectedLabels, &expectedUnixPermissions, &snapshotDirAccess, &exportRule).Return(nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, originalFilesystem, api.StateAvailable, []string{api.StateError}, - driver.defaultTimeout()).Return(api.StateAvailable, nil).Times(1) + driver.defaultTimeout(), api.Import).Return(api.StateAvailable, nil).Times(1) result := driver.Import(ctx, volConfig, originalName) @@ -4128,7 +4133,7 @@ func TestImport_SMB_Managed(t *testing.T) { mockAPI.EXPECT().ModifyVolume(ctx, originalFilesystem, expectedLabels, nil, &snapshotDirAccess, &exportRule).Return(nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, originalFilesystem, api.StateAvailable, []string{api.StateError}, - driver.defaultTimeout()).Return(api.StateAvailable, nil).Times(1) + driver.defaultTimeout(), api.Import).Return(api.StateAvailable, nil).Times(1) result := driver.Import(ctx, volConfig, originalName) @@ -4233,7 +4238,7 @@ func TestImport_ManagedWithLabels(t *testing.T) { mockAPI.EXPECT().ModifyVolume(ctx, originalFilesystem, expectedLabels, &expectedUnixPermissions, &snapshotDirAccess, &exportRule).Return(nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, originalFilesystem, api.StateAvailable, []string{api.StateError}, - driver.defaultTimeout()).Return(api.StateAvailable, nil).Times(1) + driver.defaultTimeout(), api.Import).Return(api.StateAvailable, nil).Times(1) result := driver.Import(ctx, volConfig, originalName) @@ -4441,7 +4446,7 @@ func TestImport_VolumeWaitFailed(t *testing.T) { mockAPI.EXPECT().ModifyVolume(ctx, originalFilesystem, expectedLabels, &expectedUnixPermissions, &snapshotDirAccess, &exportRule).Return(nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, originalFilesystem, api.StateAvailable, []string{api.StateError}, - driver.defaultTimeout()).Return("", errFailed).Times(1) + driver.defaultTimeout(), api.Import).Return("", errFailed).Times(1) result := driver.Import(ctx, volConfig, originalName) @@ -4509,9 +4514,9 @@ func TestWaitForVolumeCreate_Available(t *testing.T) { } mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateAvailable, nil).Times(1) + driver.volumeCreateTimeout, api.Create).Return(api.StateAvailable, nil).Times(1) - result := driver.waitForVolumeCreate(ctx, filesystem) + result := driver.waitForVolumeCreate(ctx, filesystem, api.Create) assert.Nil(t, result) } @@ -4528,9 +4533,9 @@ func TestWaitForVolumeCreate_Creating(t *testing.T) { } mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(state, errFailed).Times(1) + driver.volumeCreateTimeout, api.Create).Return(state, errFailed).Times(1) - result := driver.waitForVolumeCreate(ctx, filesystem) + result := driver.waitForVolumeCreate(ctx, filesystem, api.Create) assert.Error(t, result, "expected error") assert.IsType(t, errors.VolumeCreatingError(""), result, "not VolumeCreatingError") @@ -4547,11 +4552,11 @@ func TestWaitForVolumeCreate_DeletingDeleteFinished(t *testing.T) { } mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateDeleting, errFailed).Times(1) + driver.volumeCreateTimeout, api.Delete).Return(api.StateDeleting, errFailed).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateDeleted, []string{api.StateError}, - driver.defaultTimeout()).Return(api.StateDeleted, nil).Times(1) + driver.defaultTimeout(), api.Delete).Return(api.StateDeleted, nil).Times(1) - result := driver.waitForVolumeCreate(ctx, filesystem) + result := driver.waitForVolumeCreate(ctx, filesystem, api.Delete) assert.Nil(t, result, "not nil") } @@ -4566,11 +4571,11 @@ func TestWaitForVolumeCreate_DeletingDeleteNotFinished(t *testing.T) { } mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateDeleting, errFailed).Times(1) + driver.volumeCreateTimeout, api.Delete).Return(api.StateDeleting, errFailed).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateDeleted, []string{api.StateError}, - driver.defaultTimeout()).Return(api.StateDeleting, errFailed).Times(1) + driver.defaultTimeout(), api.Delete).Return(api.StateDeleting, errFailed).Times(1) - result := driver.waitForVolumeCreate(ctx, filesystem) + result := driver.waitForVolumeCreate(ctx, filesystem, api.Delete) assert.Nil(t, result, "not nil") } @@ -4585,12 +4590,12 @@ func TestWaitForVolumeCreate_ErrorDelete(t *testing.T) { } mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateError, errFailed).Times(1) + driver.volumeCreateTimeout, api.Create).Return(api.StateError, errFailed).Times(1) mockAPI.EXPECT().DeleteVolume(ctx, filesystem).Return(nil).Times(1) - result := driver.waitForVolumeCreate(ctx, filesystem) + result := driver.waitForVolumeCreate(ctx, filesystem, api.Create) - assert.Nil(t, result, "not nil") + assert.NotNil(t, result, "error is nil") } func TestWaitForVolumeCreate_ErrorDeleteFailed(t *testing.T) { @@ -4602,13 +4607,14 @@ func TestWaitForVolumeCreate_ErrorDeleteFailed(t *testing.T) { ProvisioningState: api.StateCreating, } + // todo: check why should an error state not return an error mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateError, errFailed).Times(1) + driver.volumeCreateTimeout, api.Delete).Return(api.StateError, errFailed).Times(1) mockAPI.EXPECT().DeleteVolume(ctx, filesystem).Return(errFailed).Times(1) - result := driver.waitForVolumeCreate(ctx, filesystem) + result := driver.waitForVolumeCreate(ctx, filesystem, api.Delete) - assert.Nil(t, result, "not nil") + assert.NotNil(t, result, "error is nil") } func TestWaitForVolumeCreate_OtherStates(t *testing.T) { @@ -4623,9 +4629,9 @@ func TestWaitForVolumeCreate_OtherStates(t *testing.T) { } mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError}, - driver.volumeCreateTimeout).Return(state, errFailed).Times(1) + driver.volumeCreateTimeout, api.Create).Return(state, errFailed).Times(1) - result := driver.waitForVolumeCreate(ctx, filesystem) + result := driver.waitForVolumeCreate(ctx, filesystem, api.Create) assert.Nil(t, result, "not nil") } @@ -4736,7 +4742,7 @@ func TestDestroy_NFSVolume(t *testing.T) { mockAPI.EXPECT().VolumeExists(ctx, volConfig).Return(true, filesystem, nil).Times(1) mockAPI.EXPECT().DeleteVolume(ctx, filesystem).Return(nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateDeleted, []string{api.StateError}, - driver.defaultTimeout()).Return(api.StateDeleted, nil).Times(1) + driver.defaultTimeout(), api.Delete).Return(api.StateDeleted, nil).Times(1) result := driver.Destroy(ctx, volConfig) @@ -4794,7 +4800,7 @@ func TestDestroy_StillDeletingDeleted(t *testing.T) { mockAPI.EXPECT().RefreshAzureResources(ctx).Return(nil).Times(1) mockAPI.EXPECT().VolumeExists(ctx, volConfig).Return(true, filesystem, nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateDeleted, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateDeleted, nil).Times(1) + driver.volumeCreateTimeout, api.Delete).Return(api.StateDeleted, nil).Times(1) result := driver.Destroy(ctx, volConfig) @@ -4811,7 +4817,7 @@ func TestDestroy_StillDeleting(t *testing.T) { mockAPI.EXPECT().RefreshAzureResources(ctx).Return(nil).Times(1) mockAPI.EXPECT().VolumeExists(ctx, volConfig).Return(true, filesystem, nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateDeleted, []string{api.StateError}, - driver.volumeCreateTimeout).Return(api.StateDeleting, errFailed).Times(1) + driver.volumeCreateTimeout, api.Delete).Return(api.StateDeleting, errFailed).Times(1) result := driver.Destroy(ctx, volConfig) @@ -4843,7 +4849,7 @@ func TestDestroy_VolumeWaitFailed(t *testing.T) { mockAPI.EXPECT().VolumeExists(ctx, volConfig).Return(true, filesystem, nil).Times(1) mockAPI.EXPECT().DeleteVolume(ctx, filesystem).Return(nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateDeleted, []string{api.StateError}, - driver.defaultTimeout()).Return(api.StateDeleting, errFailed).Times(1) + driver.defaultTimeout(), api.Delete).Return(api.StateDeleting, errFailed).Times(1) result := driver.Destroy(ctx, volConfig) @@ -4860,7 +4866,7 @@ func TestDestroy_SMBVolume(t *testing.T) { mockAPI.EXPECT().VolumeExists(ctx, volConfig).Return(true, filesystem, nil).Times(1) mockAPI.EXPECT().DeleteVolume(ctx, filesystem).Return(nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateDeleted, []string{api.StateError}, - driver.defaultTimeout()).Return(api.StateDeleted, nil).Times(1) + driver.defaultTimeout(), api.Delete).Return(api.StateDeleted, nil).Times(1) result := driver.Destroy(ctx, volConfig) @@ -5649,7 +5655,7 @@ func TestRestoreSnapshot(t *testing.T) { mockAPI.EXPECT().SnapshotForVolume(ctx, filesystem, snapConfig.InternalName).Return(snapshot, nil).Times(1) mockAPI.EXPECT().RestoreSnapshot(ctx, filesystem, snapshot).Return(nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, - []string{api.StateError, api.StateDeleting, api.StateDeleted}, api.DefaultSDKTimeout). + []string{api.StateError, api.StateDeleting, api.StateDeleted}, api.DefaultSDKTimeout, api.Restore). Return(api.StateAvailable, nil).Times(1) result := driver.RestoreSnapshot(ctx, snapConfig, volConfig) @@ -5763,7 +5769,7 @@ func TestRestoreSnapshot_VolumeWaitFailed(t *testing.T) { mockAPI.EXPECT().RestoreSnapshot(ctx, filesystem, snapshot).Return(nil).Times(1) mockAPI.EXPECT().WaitForVolumeState(ctx, filesystem, api.StateAvailable, []string{api.StateError, api.StateDeleting, api.StateDeleted}, - api.DefaultSDKTimeout).Return("", errFailed).Times(1) + api.DefaultSDKTimeout, api.Restore).Return("", errFailed).Times(1) result := driver.RestoreSnapshot(ctx, snapConfig, volConfig)